4 * SH4 => x86 translation. This version does no real optimization, it just
5 * outputs straight-line x86 code - it mainly exists to provide a baseline
6 * to test the optimizing versions against.
8 * Copyright (c) 2007 Nathan Keynes.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
28 #include "sh4/xltcache.h"
29 #include "sh4/sh4core.h"
30 #include "sh4/sh4trans.h"
31 #include "sh4/sh4mmio.h"
32 #include "sh4/x86op.h"
35 #define DEFAULT_BACKPATCH_SIZE 4096
37 struct backpatch_record {
39 uint32_t fixup_icount;
43 #define MAX_RECOVERY_SIZE 2048
50 * Struct to manage internal translation state. This state is not saved -
51 * it is only valid between calls to sh4_translate_begin_block() and
52 * sh4_translate_end_block()
54 struct sh4_x86_state {
56 gboolean priv_checked; /* true if we've already checked the cpu mode. */
57 gboolean fpuen_checked; /* true if we've already checked fpu enabled. */
58 gboolean branch_taken; /* true if we branched unconditionally */
59 uint32_t block_start_pc;
60 uint32_t stack_posn; /* Trace stack height for alignment purposes */
64 gboolean tlb_on; /* True if tlb translation is active */
66 /* Allocated memory for the (block-wide) back-patch list */
67 struct backpatch_record *backpatch_list;
68 uint32_t backpatch_posn;
69 uint32_t backpatch_size;
70 struct xlat_recovery_record recovery_list[MAX_RECOVERY_SIZE];
71 uint32_t recovery_posn;
74 #define TSTATE_NONE -1
84 /** Branch if T is set (either in the current cflags, or in sh4r.t) */
85 #define JT_rel8(rel8,label) if( sh4_x86.tstate == TSTATE_NONE ) { \
86 CMP_imm8s_sh4r( 1, R_T ); sh4_x86.tstate = TSTATE_E; } \
87 OP(0x70+sh4_x86.tstate); OP(rel8); \
89 /** Branch if T is clear (either in the current cflags or in sh4r.t) */
90 #define JF_rel8(rel8,label) if( sh4_x86.tstate == TSTATE_NONE ) { \
91 CMP_imm8s_sh4r( 1, R_T ); sh4_x86.tstate = TSTATE_E; } \
92 OP(0x70+ (sh4_x86.tstate^1)); OP(rel8); \
95 static struct sh4_x86_state sh4_x86;
97 static uint32_t max_int = 0x7FFFFFFF;
98 static uint32_t min_int = 0x80000000;
99 static uint32_t save_fcw; /* save value for fpu control word */
100 static uint32_t trunc_fcw = 0x0F7F; /* fcw value for truncation mode */
104 sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE);
105 sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(struct backpatch_record);
109 static void sh4_x86_add_backpatch( uint8_t *fixup_addr, uint32_t fixup_pc, uint32_t exc_code )
111 if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) {
112 sh4_x86.backpatch_size <<= 1;
113 sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list,
114 sh4_x86.backpatch_size * sizeof(struct backpatch_record));
115 assert( sh4_x86.backpatch_list != NULL );
117 if( sh4_x86.in_delay_slot ) {
120 sh4_x86.backpatch_list[sh4_x86.backpatch_posn].fixup_addr = (uint32_t *)fixup_addr;
121 sh4_x86.backpatch_list[sh4_x86.backpatch_posn].fixup_icount = (fixup_pc - sh4_x86.block_start_pc)>>1;
122 sh4_x86.backpatch_list[sh4_x86.backpatch_posn].exc_code = exc_code;
123 sh4_x86.backpatch_posn++;
126 void sh4_x86_add_recovery( uint32_t pc )
128 xlat_recovery[xlat_recovery_posn].xlat_pc = (uintptr_t)xlat_output;
129 xlat_recovery[xlat_recovery_posn].sh4_icount = (pc - sh4_x86.block_start_pc)>>1;
130 xlat_recovery_posn++;
134 * Emit an instruction to load an SH4 reg into a real register
136 static inline void load_reg( int x86reg, int sh4reg )
138 /* mov [bp+n], reg */
140 OP(0x45 + (x86reg<<3));
141 OP(REG_OFFSET(r[sh4reg]));
144 static inline void load_reg16s( int x86reg, int sh4reg )
148 MODRM_r32_sh4r(x86reg, REG_OFFSET(r[sh4reg]));
151 static inline void load_reg16u( int x86reg, int sh4reg )
155 MODRM_r32_sh4r(x86reg, REG_OFFSET(r[sh4reg]));
159 #define load_spreg( x86reg, regoff ) MOV_sh4r_r32( regoff, x86reg )
160 #define store_spreg( x86reg, regoff ) MOV_r32_sh4r( x86reg, regoff )
162 * Emit an instruction to load an immediate value into a register
164 static inline void load_imm32( int x86reg, uint32_t value ) {
165 /* mov #value, reg */
171 * Load an immediate 64-bit quantity (note: x86-64 only)
173 static inline void load_imm64( int x86reg, uint32_t value ) {
174 /* mov #value, reg */
182 * Emit an instruction to store an SH4 reg (RN)
184 void static inline store_reg( int x86reg, int sh4reg ) {
185 /* mov reg, [bp+n] */
187 OP(0x45 + (x86reg<<3));
188 OP(REG_OFFSET(r[sh4reg]));
191 #define load_fr_bank(bankreg) load_spreg( bankreg, REG_OFFSET(fr_bank))
194 * Load an FR register (single-precision floating point) into an integer x86
195 * register (eg for register-to-register moves)
197 void static inline load_fr( int bankreg, int x86reg, int frm )
199 OP(0x8B); OP(0x40+bankreg+(x86reg<<3)); OP((frm^1)<<2);
203 * Store an FR register (single-precision floating point) into an integer x86
204 * register (eg for register-to-register moves)
206 void static inline store_fr( int bankreg, int x86reg, int frn )
208 OP(0x89); OP(0x40+bankreg+(x86reg<<3)); OP((frn^1)<<2);
213 * Load a pointer to the back fp back into the specified x86 register. The
214 * bankreg must have been previously loaded with FPSCR.
217 static inline void load_xf_bank( int bankreg )
220 SHR_imm8_r32( (21 - 6), bankreg ); // Extract bit 21 then *64 for bank size
221 AND_imm8s_r32( 0x40, bankreg ); // Complete extraction
222 OP(0x8D); OP(0x44+(bankreg<<3)); OP(0x28+bankreg); OP(REG_OFFSET(fr)); // LEA [ebp+bankreg+disp], bankreg
226 * Update the fr_bank pointer based on the current fpscr value.
228 static inline void update_fr_bank( int fpscrreg )
230 SHR_imm8_r32( (21 - 6), fpscrreg ); // Extract bit 21 then *64 for bank size
231 AND_imm8s_r32( 0x40, fpscrreg ); // Complete extraction
232 OP(0x8D); OP(0x44+(fpscrreg<<3)); OP(0x28+fpscrreg); OP(REG_OFFSET(fr)); // LEA [ebp+fpscrreg+disp], fpscrreg
233 store_spreg( fpscrreg, REG_OFFSET(fr_bank) );
236 * Push FPUL (as a 32-bit float) onto the FPU stack
238 static inline void push_fpul( )
240 OP(0xD9); OP(0x45); OP(R_FPUL);
244 * Pop FPUL (as a 32-bit float) from the FPU stack
246 static inline void pop_fpul( )
248 OP(0xD9); OP(0x5D); OP(R_FPUL);
252 * Push a 32-bit float onto the FPU stack, with bankreg previously loaded
253 * with the location of the current fp bank.
255 static inline void push_fr( int bankreg, int frm )
257 OP(0xD9); OP(0x40 + bankreg); OP((frm^1)<<2); // FLD.S [bankreg + frm^1*4]
261 * Pop a 32-bit float from the FPU stack and store it back into the fp bank,
262 * with bankreg previously loaded with the location of the current fp bank.
264 static inline void pop_fr( int bankreg, int frm )
266 OP(0xD9); OP(0x58 + bankreg); OP((frm^1)<<2); // FST.S [bankreg + frm^1*4]
270 * Push a 64-bit double onto the FPU stack, with bankreg previously loaded
271 * with the location of the current fp bank.
273 static inline void push_dr( int bankreg, int frm )
275 OP(0xDD); OP(0x40 + bankreg); OP(frm<<2); // FLD.D [bankreg + frm*4]
278 static inline void pop_dr( int bankreg, int frm )
280 OP(0xDD); OP(0x58 + bankreg); OP(frm<<2); // FST.D [bankreg + frm*4]
283 /* Exception checks - Note that all exception checks will clobber EAX */
285 #define check_priv( ) \
286 if( !sh4_x86.priv_checked ) { \
287 sh4_x86.priv_checked = TRUE;\
288 load_spreg( R_EAX, R_SR );\
289 AND_imm32_r32( SR_MD, R_EAX );\
290 if( sh4_x86.in_delay_slot ) {\
291 JE_exc( EXC_SLOT_ILLEGAL );\
293 JE_exc( EXC_ILLEGAL );\
297 #define check_fpuen( ) \
298 if( !sh4_x86.fpuen_checked ) {\
299 sh4_x86.fpuen_checked = TRUE;\
300 load_spreg( R_EAX, R_SR );\
301 AND_imm32_r32( SR_FD, R_EAX );\
302 if( sh4_x86.in_delay_slot ) {\
303 JNE_exc(EXC_SLOT_FPU_DISABLED);\
305 JNE_exc(EXC_FPU_DISABLED);\
309 #define check_ralign16( x86reg ) \
310 TEST_imm32_r32( 0x00000001, x86reg ); \
311 JNE_exc(EXC_DATA_ADDR_READ)
313 #define check_walign16( x86reg ) \
314 TEST_imm32_r32( 0x00000001, x86reg ); \
315 JNE_exc(EXC_DATA_ADDR_WRITE);
317 #define check_ralign32( x86reg ) \
318 TEST_imm32_r32( 0x00000003, x86reg ); \
319 JNE_exc(EXC_DATA_ADDR_READ)
321 #define check_walign32( x86reg ) \
322 TEST_imm32_r32( 0x00000003, x86reg ); \
323 JNE_exc(EXC_DATA_ADDR_WRITE);
326 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
327 #define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
328 #define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
329 #define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
330 #define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
331 #define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
332 #define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
335 * Perform MMU translation on the address in addr_reg for a read operation, iff the TLB is turned
336 * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
338 #define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
340 * Perform MMU translation on the address in addr_reg for a write operation, iff the TLB is turned
341 * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
343 #define MMU_TRANSLATE_WRITE( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
345 #define MEM_READ_SIZE (CALL_FUNC1_SIZE)
346 #define MEM_WRITE_SIZE (CALL_FUNC2_SIZE)
347 #define MMU_TRANSLATE_SIZE (sh4_x86.tlb_on ? (CALL_FUNC1_SIZE + 12) : 0 )
349 #define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = DELAY_NONE; return 1;
351 /****** Import appropriate calling conventions ******/
352 #if SH4_TRANSLATOR == TARGET_X86_64
353 #include "sh4/ia64abi.h"
354 #else /* SH4_TRANSLATOR == TARGET_X86 */
356 #include "sh4/ia32mac.h"
358 #include "sh4/ia32abi.h"
363 * Embed a breakpoint into the generated code
365 void sh4_translate_emit_breakpoint( sh4vma_t pc )
367 load_imm32( R_EAX, XLAT_EXIT_BREAKPOINT );
368 call_func1( sh4_translate_exit, R_EAX );
372 * Embed a call to sh4_execute_instruction for situations that we
373 * can't translate (mainly page-crossing delay slots at the moment).
374 * Caller is responsible for setting new_pc.
376 void sh4_emulator_exit( sh4vma_t endpc )
378 load_imm32( R_ECX, endpc - sh4_x86.block_start_pc ); // 5
379 ADD_r32_sh4r( R_ECX, R_PC );
381 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
382 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
383 load_imm32( R_ECX, sh4_x86.in_delay_slot ? 1 : 0 );
384 store_spreg( R_ECX, REG_OFFSET(in_delay_slot) );
386 call_func0( sh4_execute_instruction );
387 load_imm32( R_EAX, R_PC );
388 if( sh4_x86.tlb_on ) {
389 call_func1(xlat_get_code_by_vma,R_EAX);
391 call_func1(xlat_get_code,R_EAX);
393 AND_imm8s_r32( 0xFC, R_EAX ); // 3
399 * Translate a single instruction. Delayed branches are handled specially
400 * by translating both branch and delayed instruction as a single unit (as
402 * The instruction MUST be in the icache (assert check)
404 * @return true if the instruction marks the end of a basic block
407 uint32_t sh4_translate_instruction( sh4vma_t pc )
410 /* Read instruction from icache */
411 assert( IS_IN_ICACHE(pc) );
412 ir = *(uint16_t *)GET_ICACHE_PTR(pc);
414 /* PC is not in the current icache - this usually means we're running
415 * with MMU on, and we've gone past the end of the page. And since
416 * sh4_translate_block is pretty careful about this, it means we're
417 * almost certainly in a delay slot.
419 * Since we can't assume the page is present (and we can't fault it in
420 * at this point, inline a call to sh4_execute_instruction (with a few
421 * small repairs to cope with the different environment).
424 if( !sh4_x86.in_delay_slot ) {
425 sh4_x86_add_recovery(pc);
427 switch( (ir&0xF000) >> 12 ) {
431 switch( (ir&0x80) >> 7 ) {
433 switch( (ir&0x70) >> 4 ) {
436 uint32_t Rn = ((ir>>8)&0xF);
438 call_func0(sh4_read_sr);
439 store_reg( R_EAX, Rn );
440 sh4_x86.tstate = TSTATE_NONE;
445 uint32_t Rn = ((ir>>8)&0xF);
446 load_spreg( R_EAX, R_GBR );
447 store_reg( R_EAX, Rn );
452 uint32_t Rn = ((ir>>8)&0xF);
454 load_spreg( R_EAX, R_VBR );
455 store_reg( R_EAX, Rn );
456 sh4_x86.tstate = TSTATE_NONE;
461 uint32_t Rn = ((ir>>8)&0xF);
463 load_spreg( R_EAX, R_SSR );
464 store_reg( R_EAX, Rn );
465 sh4_x86.tstate = TSTATE_NONE;
470 uint32_t Rn = ((ir>>8)&0xF);
472 load_spreg( R_EAX, R_SPC );
473 store_reg( R_EAX, Rn );
474 sh4_x86.tstate = TSTATE_NONE;
483 { /* STC Rm_BANK, Rn */
484 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm_BANK = ((ir>>4)&0x7);
486 load_spreg( R_EAX, REG_OFFSET(r_bank[Rm_BANK]) );
487 store_reg( R_EAX, Rn );
488 sh4_x86.tstate = TSTATE_NONE;
494 switch( (ir&0xF0) >> 4 ) {
497 uint32_t Rn = ((ir>>8)&0xF);
498 if( sh4_x86.in_delay_slot ) {
501 load_spreg( R_EAX, R_PC );
502 ADD_imm32_r32( pc + 4 - sh4_x86.block_start_pc, R_EAX );
503 store_spreg( R_EAX, R_PR );
504 ADD_sh4r_r32( REG_OFFSET(r[Rn]), R_EAX );
505 store_spreg( R_EAX, R_NEW_PC );
507 sh4_x86.tstate = TSTATE_NONE;
508 sh4_translate_instruction( pc + 2 );
509 exit_block_newpcset(pc+2);
510 sh4_x86.branch_taken = TRUE;
517 uint32_t Rn = ((ir>>8)&0xF);
518 if( sh4_x86.in_delay_slot ) {
521 load_spreg( R_EAX, R_PC );
522 ADD_imm32_r32( pc + 4 - sh4_x86.block_start_pc, R_EAX );
523 ADD_sh4r_r32( REG_OFFSET(r[Rn]), R_EAX );
524 store_spreg( R_EAX, R_NEW_PC );
525 sh4_x86.in_delay_slot = DELAY_PC;
526 sh4_x86.tstate = TSTATE_NONE;
527 sh4_translate_instruction( pc + 2 );
528 exit_block_newpcset(pc+2);
529 sh4_x86.branch_taken = TRUE;
536 uint32_t Rn = ((ir>>8)&0xF);
537 load_reg( R_EAX, Rn );
538 MOV_r32_r32( R_EAX, R_ECX );
539 AND_imm32_r32( 0xFC000000, R_EAX );
540 CMP_imm32_r32( 0xE0000000, R_EAX );
541 JNE_rel8(8+CALL_FUNC1_SIZE, end);
542 call_func1( sh4_flush_store_queue, R_ECX );
543 TEST_r32_r32( R_EAX, R_EAX );
546 sh4_x86.tstate = TSTATE_NONE;
551 uint32_t Rn = ((ir>>8)&0xF);
556 uint32_t Rn = ((ir>>8)&0xF);
561 uint32_t Rn = ((ir>>8)&0xF);
565 { /* MOVCA.L R0, @Rn */
566 uint32_t Rn = ((ir>>8)&0xF);
567 load_reg( R_EAX, Rn );
568 check_walign32( R_EAX );
569 MMU_TRANSLATE_WRITE( R_EAX );
570 load_reg( R_EDX, 0 );
571 MEM_WRITE_LONG( R_EAX, R_EDX );
572 sh4_x86.tstate = TSTATE_NONE;
581 { /* MOV.B Rm, @(R0, Rn) */
582 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
583 load_reg( R_EAX, 0 );
584 load_reg( R_ECX, Rn );
585 ADD_r32_r32( R_ECX, R_EAX );
586 MMU_TRANSLATE_WRITE( R_EAX );
587 load_reg( R_EDX, Rm );
588 MEM_WRITE_BYTE( R_EAX, R_EDX );
589 sh4_x86.tstate = TSTATE_NONE;
593 { /* MOV.W Rm, @(R0, Rn) */
594 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
595 load_reg( R_EAX, 0 );
596 load_reg( R_ECX, Rn );
597 ADD_r32_r32( R_ECX, R_EAX );
598 check_walign16( R_EAX );
599 MMU_TRANSLATE_WRITE( R_EAX );
600 load_reg( R_EDX, Rm );
601 MEM_WRITE_WORD( R_EAX, R_EDX );
602 sh4_x86.tstate = TSTATE_NONE;
606 { /* MOV.L Rm, @(R0, Rn) */
607 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
608 load_reg( R_EAX, 0 );
609 load_reg( R_ECX, Rn );
610 ADD_r32_r32( R_ECX, R_EAX );
611 check_walign32( R_EAX );
612 MMU_TRANSLATE_WRITE( R_EAX );
613 load_reg( R_EDX, Rm );
614 MEM_WRITE_LONG( R_EAX, R_EDX );
615 sh4_x86.tstate = TSTATE_NONE;
620 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
621 load_reg( R_EAX, Rm );
622 load_reg( R_ECX, Rn );
624 store_spreg( R_EAX, R_MACL );
625 sh4_x86.tstate = TSTATE_NONE;
629 switch( (ir&0xFF0) >> 4 ) {
634 sh4_x86.tstate = TSTATE_C;
641 sh4_x86.tstate = TSTATE_C;
646 XOR_r32_r32(R_EAX, R_EAX);
647 store_spreg( R_EAX, R_MACL );
648 store_spreg( R_EAX, R_MACH );
649 sh4_x86.tstate = TSTATE_NONE;
654 call_func0( MMU_ldtlb );
661 sh4_x86.tstate = TSTATE_C;
668 sh4_x86.tstate = TSTATE_C;
677 switch( (ir&0xF0) >> 4 ) {
680 /* Do nothing. Well, we could emit an 0x90, but what would really be the point? */
685 XOR_r32_r32( R_EAX, R_EAX );
686 store_spreg( R_EAX, R_Q );
687 store_spreg( R_EAX, R_M );
688 store_spreg( R_EAX, R_T );
689 sh4_x86.tstate = TSTATE_C; // works for DIV1
694 uint32_t Rn = ((ir>>8)&0xF);
695 load_spreg( R_EAX, R_T );
696 store_reg( R_EAX, Rn );
705 switch( (ir&0xF0) >> 4 ) {
708 uint32_t Rn = ((ir>>8)&0xF);
709 load_spreg( R_EAX, R_MACH );
710 store_reg( R_EAX, Rn );
715 uint32_t Rn = ((ir>>8)&0xF);
716 load_spreg( R_EAX, R_MACL );
717 store_reg( R_EAX, Rn );
722 uint32_t Rn = ((ir>>8)&0xF);
723 load_spreg( R_EAX, R_PR );
724 store_reg( R_EAX, Rn );
729 uint32_t Rn = ((ir>>8)&0xF);
731 load_spreg( R_EAX, R_SGR );
732 store_reg( R_EAX, Rn );
733 sh4_x86.tstate = TSTATE_NONE;
738 uint32_t Rn = ((ir>>8)&0xF);
739 load_spreg( R_EAX, R_FPUL );
740 store_reg( R_EAX, Rn );
744 { /* STS FPSCR, Rn */
745 uint32_t Rn = ((ir>>8)&0xF);
746 load_spreg( R_EAX, R_FPSCR );
747 store_reg( R_EAX, Rn );
752 uint32_t Rn = ((ir>>8)&0xF);
754 load_spreg( R_EAX, R_DBR );
755 store_reg( R_EAX, Rn );
756 sh4_x86.tstate = TSTATE_NONE;
765 switch( (ir&0xFF0) >> 4 ) {
768 if( sh4_x86.in_delay_slot ) {
771 load_spreg( R_ECX, R_PR );
772 store_spreg( R_ECX, R_NEW_PC );
773 sh4_x86.in_delay_slot = DELAY_PC;
774 sh4_translate_instruction(pc+2);
775 exit_block_newpcset(pc+2);
776 sh4_x86.branch_taken = TRUE;
784 call_func0( sh4_sleep );
785 sh4_x86.tstate = TSTATE_NONE;
786 sh4_x86.in_delay_slot = DELAY_NONE;
792 if( sh4_x86.in_delay_slot ) {
796 load_spreg( R_ECX, R_SPC );
797 store_spreg( R_ECX, R_NEW_PC );
798 load_spreg( R_EAX, R_SSR );
799 call_func1( sh4_write_sr, R_EAX );
800 sh4_x86.in_delay_slot = DELAY_PC;
801 sh4_x86.priv_checked = FALSE;
802 sh4_x86.fpuen_checked = FALSE;
803 sh4_x86.tstate = TSTATE_NONE;
804 sh4_translate_instruction(pc+2);
805 exit_block_newpcset(pc+2);
806 sh4_x86.branch_taken = TRUE;
817 { /* MOV.B @(R0, Rm), Rn */
818 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
819 load_reg( R_EAX, 0 );
820 load_reg( R_ECX, Rm );
821 ADD_r32_r32( R_ECX, R_EAX );
822 MMU_TRANSLATE_READ( R_EAX )
823 MEM_READ_BYTE( R_EAX, R_EAX );
824 store_reg( R_EAX, Rn );
825 sh4_x86.tstate = TSTATE_NONE;
829 { /* MOV.W @(R0, Rm), Rn */
830 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
831 load_reg( R_EAX, 0 );
832 load_reg( R_ECX, Rm );
833 ADD_r32_r32( R_ECX, R_EAX );
834 check_ralign16( R_EAX );
835 MMU_TRANSLATE_READ( R_EAX );
836 MEM_READ_WORD( R_EAX, R_EAX );
837 store_reg( R_EAX, Rn );
838 sh4_x86.tstate = TSTATE_NONE;
842 { /* MOV.L @(R0, Rm), Rn */
843 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
844 load_reg( R_EAX, 0 );
845 load_reg( R_ECX, Rm );
846 ADD_r32_r32( R_ECX, R_EAX );
847 check_ralign32( R_EAX );
848 MMU_TRANSLATE_READ( R_EAX );
849 MEM_READ_LONG( R_EAX, R_EAX );
850 store_reg( R_EAX, Rn );
851 sh4_x86.tstate = TSTATE_NONE;
855 { /* MAC.L @Rm+, @Rn+ */
856 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
858 load_reg( R_EAX, Rm );
859 check_ralign32( R_EAX );
860 MMU_TRANSLATE_READ( R_EAX );
861 PUSH_realigned_r32( R_EAX );
862 load_reg( R_EAX, Rn );
863 ADD_imm8s_r32( 4, R_EAX );
864 MMU_TRANSLATE_READ( R_EAX );
865 ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rn]) );
866 // Note translate twice in case of page boundaries. Maybe worth
867 // adding a page-boundary check to skip the second translation
869 load_reg( R_EAX, Rm );
870 check_ralign32( R_EAX );
871 MMU_TRANSLATE_READ( R_EAX );
872 PUSH_realigned_r32( R_EAX );
873 load_reg( R_EAX, Rn );
874 check_ralign32( R_EAX );
875 MMU_TRANSLATE_READ( R_EAX );
876 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
877 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
879 MEM_READ_LONG( R_EAX, R_EAX );
882 MEM_READ_LONG( R_ECX, R_EAX );
883 POP_realigned_r32( R_ECX );
886 ADD_r32_sh4r( R_EAX, R_MACL );
887 ADC_r32_sh4r( R_EDX, R_MACH );
889 load_spreg( R_ECX, R_S );
890 TEST_r32_r32(R_ECX, R_ECX);
891 JE_rel8( CALL_FUNC0_SIZE, nosat );
892 call_func0( signsat48 );
894 sh4_x86.tstate = TSTATE_NONE;
903 { /* MOV.L Rm, @(disp, Rn) */
904 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2;
905 load_reg( R_EAX, Rn );
906 ADD_imm32_r32( disp, R_EAX );
907 check_walign32( R_EAX );
908 MMU_TRANSLATE_WRITE( R_EAX );
909 load_reg( R_EDX, Rm );
910 MEM_WRITE_LONG( R_EAX, R_EDX );
911 sh4_x86.tstate = TSTATE_NONE;
917 { /* MOV.B Rm, @Rn */
918 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
919 load_reg( R_EAX, Rn );
920 MMU_TRANSLATE_WRITE( R_EAX );
921 load_reg( R_EDX, Rm );
922 MEM_WRITE_BYTE( R_EAX, R_EDX );
923 sh4_x86.tstate = TSTATE_NONE;
927 { /* MOV.W Rm, @Rn */
928 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
929 load_reg( R_EAX, Rn );
930 check_walign16( R_EAX );
931 MMU_TRANSLATE_WRITE( R_EAX )
932 load_reg( R_EDX, Rm );
933 MEM_WRITE_WORD( R_EAX, R_EDX );
934 sh4_x86.tstate = TSTATE_NONE;
938 { /* MOV.L Rm, @Rn */
939 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
940 load_reg( R_EAX, Rn );
941 check_walign32(R_EAX);
942 MMU_TRANSLATE_WRITE( R_EAX );
943 load_reg( R_EDX, Rm );
944 MEM_WRITE_LONG( R_EAX, R_EDX );
945 sh4_x86.tstate = TSTATE_NONE;
949 { /* MOV.B Rm, @-Rn */
950 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
951 load_reg( R_EAX, Rn );
952 ADD_imm8s_r32( -1, R_EAX );
953 MMU_TRANSLATE_WRITE( R_EAX );
954 load_reg( R_EDX, Rm );
955 ADD_imm8s_sh4r( -1, REG_OFFSET(r[Rn]) );
956 MEM_WRITE_BYTE( R_EAX, R_EDX );
957 sh4_x86.tstate = TSTATE_NONE;
961 { /* MOV.W Rm, @-Rn */
962 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
963 load_reg( R_EAX, Rn );
964 ADD_imm8s_r32( -2, R_EAX );
965 check_walign16( R_EAX );
966 MMU_TRANSLATE_WRITE( R_EAX );
967 load_reg( R_EDX, Rm );
968 ADD_imm8s_sh4r( -2, REG_OFFSET(r[Rn]) );
969 MEM_WRITE_WORD( R_EAX, R_EDX );
970 sh4_x86.tstate = TSTATE_NONE;
974 { /* MOV.L Rm, @-Rn */
975 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
976 load_reg( R_EAX, Rn );
977 ADD_imm8s_r32( -4, R_EAX );
978 check_walign32( R_EAX );
979 MMU_TRANSLATE_WRITE( R_EAX );
980 load_reg( R_EDX, Rm );
981 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
982 MEM_WRITE_LONG( R_EAX, R_EDX );
983 sh4_x86.tstate = TSTATE_NONE;
988 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
989 load_reg( R_EAX, Rm );
990 load_reg( R_ECX, Rn );
991 SHR_imm8_r32( 31, R_EAX );
992 SHR_imm8_r32( 31, R_ECX );
993 store_spreg( R_EAX, R_M );
994 store_spreg( R_ECX, R_Q );
995 CMP_r32_r32( R_EAX, R_ECX );
997 sh4_x86.tstate = TSTATE_NE;
1002 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1003 load_reg( R_EAX, Rm );
1004 load_reg( R_ECX, Rn );
1005 TEST_r32_r32( R_EAX, R_ECX );
1007 sh4_x86.tstate = TSTATE_E;
1012 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1013 load_reg( R_EAX, Rm );
1014 load_reg( R_ECX, Rn );
1015 AND_r32_r32( R_EAX, R_ECX );
1016 store_reg( R_ECX, Rn );
1017 sh4_x86.tstate = TSTATE_NONE;
1022 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1023 load_reg( R_EAX, Rm );
1024 load_reg( R_ECX, Rn );
1025 XOR_r32_r32( R_EAX, R_ECX );
1026 store_reg( R_ECX, Rn );
1027 sh4_x86.tstate = TSTATE_NONE;
1032 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1033 load_reg( R_EAX, Rm );
1034 load_reg( R_ECX, Rn );
1035 OR_r32_r32( R_EAX, R_ECX );
1036 store_reg( R_ECX, Rn );
1037 sh4_x86.tstate = TSTATE_NONE;
1041 { /* CMP/STR Rm, Rn */
1042 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1043 load_reg( R_EAX, Rm );
1044 load_reg( R_ECX, Rn );
1045 XOR_r32_r32( R_ECX, R_EAX );
1046 TEST_r8_r8( R_AL, R_AL );
1047 JE_rel8(13, target1);
1048 TEST_r8_r8( R_AH, R_AH ); // 2
1049 JE_rel8(9, target2);
1050 SHR_imm8_r32( 16, R_EAX ); // 3
1051 TEST_r8_r8( R_AL, R_AL ); // 2
1052 JE_rel8(2, target3);
1053 TEST_r8_r8( R_AH, R_AH ); // 2
1054 JMP_TARGET(target1);
1055 JMP_TARGET(target2);
1056 JMP_TARGET(target3);
1058 sh4_x86.tstate = TSTATE_E;
1062 { /* XTRCT Rm, Rn */
1063 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1064 load_reg( R_EAX, Rm );
1065 load_reg( R_ECX, Rn );
1066 SHL_imm8_r32( 16, R_EAX );
1067 SHR_imm8_r32( 16, R_ECX );
1068 OR_r32_r32( R_EAX, R_ECX );
1069 store_reg( R_ECX, Rn );
1070 sh4_x86.tstate = TSTATE_NONE;
1074 { /* MULU.W Rm, Rn */
1075 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1076 load_reg16u( R_EAX, Rm );
1077 load_reg16u( R_ECX, Rn );
1079 store_spreg( R_EAX, R_MACL );
1080 sh4_x86.tstate = TSTATE_NONE;
1084 { /* MULS.W Rm, Rn */
1085 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1086 load_reg16s( R_EAX, Rm );
1087 load_reg16s( R_ECX, Rn );
1089 store_spreg( R_EAX, R_MACL );
1090 sh4_x86.tstate = TSTATE_NONE;
1101 { /* CMP/EQ Rm, Rn */
1102 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1103 load_reg( R_EAX, Rm );
1104 load_reg( R_ECX, Rn );
1105 CMP_r32_r32( R_EAX, R_ECX );
1107 sh4_x86.tstate = TSTATE_E;
1111 { /* CMP/HS Rm, Rn */
1112 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1113 load_reg( R_EAX, Rm );
1114 load_reg( R_ECX, Rn );
1115 CMP_r32_r32( R_EAX, R_ECX );
1117 sh4_x86.tstate = TSTATE_AE;
1121 { /* CMP/GE Rm, Rn */
1122 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1123 load_reg( R_EAX, Rm );
1124 load_reg( R_ECX, Rn );
1125 CMP_r32_r32( R_EAX, R_ECX );
1127 sh4_x86.tstate = TSTATE_GE;
1132 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1133 load_spreg( R_ECX, R_M );
1134 load_reg( R_EAX, Rn );
1135 if( sh4_x86.tstate != TSTATE_C ) {
1139 SETC_r8( R_DL ); // Q'
1140 CMP_sh4r_r32( R_Q, R_ECX );
1141 JE_rel8(5, mqequal);
1142 ADD_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
1144 JMP_TARGET(mqequal);
1145 SUB_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
1147 store_reg( R_EAX, Rn ); // Done with Rn now
1148 SETC_r8(R_AL); // tmp1
1149 XOR_r8_r8( R_DL, R_AL ); // Q' = Q ^ tmp1
1150 XOR_r8_r8( R_AL, R_CL ); // Q'' = Q' ^ M
1151 store_spreg( R_ECX, R_Q );
1152 XOR_imm8s_r32( 1, R_AL ); // T = !Q'
1153 MOVZX_r8_r32( R_AL, R_EAX );
1154 store_spreg( R_EAX, R_T );
1155 sh4_x86.tstate = TSTATE_NONE;
1159 { /* DMULU.L Rm, Rn */
1160 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1161 load_reg( R_EAX, Rm );
1162 load_reg( R_ECX, Rn );
1164 store_spreg( R_EDX, R_MACH );
1165 store_spreg( R_EAX, R_MACL );
1166 sh4_x86.tstate = TSTATE_NONE;
1170 { /* CMP/HI Rm, Rn */
1171 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1172 load_reg( R_EAX, Rm );
1173 load_reg( R_ECX, Rn );
1174 CMP_r32_r32( R_EAX, R_ECX );
1176 sh4_x86.tstate = TSTATE_A;
1180 { /* CMP/GT Rm, Rn */
1181 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1182 load_reg( R_EAX, Rm );
1183 load_reg( R_ECX, Rn );
1184 CMP_r32_r32( R_EAX, R_ECX );
1186 sh4_x86.tstate = TSTATE_G;
1191 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1192 load_reg( R_EAX, Rm );
1193 load_reg( R_ECX, Rn );
1194 SUB_r32_r32( R_EAX, R_ECX );
1195 store_reg( R_ECX, Rn );
1196 sh4_x86.tstate = TSTATE_NONE;
1201 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1202 load_reg( R_EAX, Rm );
1203 load_reg( R_ECX, Rn );
1204 if( sh4_x86.tstate != TSTATE_C ) {
1207 SBB_r32_r32( R_EAX, R_ECX );
1208 store_reg( R_ECX, Rn );
1210 sh4_x86.tstate = TSTATE_C;
1215 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1216 load_reg( R_EAX, Rm );
1217 load_reg( R_ECX, Rn );
1218 SUB_r32_r32( R_EAX, R_ECX );
1219 store_reg( R_ECX, Rn );
1221 sh4_x86.tstate = TSTATE_O;
1226 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1227 load_reg( R_EAX, Rm );
1228 load_reg( R_ECX, Rn );
1229 ADD_r32_r32( R_EAX, R_ECX );
1230 store_reg( R_ECX, Rn );
1231 sh4_x86.tstate = TSTATE_NONE;
1235 { /* DMULS.L Rm, Rn */
1236 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1237 load_reg( R_EAX, Rm );
1238 load_reg( R_ECX, Rn );
1240 store_spreg( R_EDX, R_MACH );
1241 store_spreg( R_EAX, R_MACL );
1242 sh4_x86.tstate = TSTATE_NONE;
1247 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1248 if( sh4_x86.tstate != TSTATE_C ) {
1251 load_reg( R_EAX, Rm );
1252 load_reg( R_ECX, Rn );
1253 ADC_r32_r32( R_EAX, R_ECX );
1254 store_reg( R_ECX, Rn );
1256 sh4_x86.tstate = TSTATE_C;
1261 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1262 load_reg( R_EAX, Rm );
1263 load_reg( R_ECX, Rn );
1264 ADD_r32_r32( R_EAX, R_ECX );
1265 store_reg( R_ECX, Rn );
1267 sh4_x86.tstate = TSTATE_O;
1278 switch( (ir&0xF0) >> 4 ) {
1281 uint32_t Rn = ((ir>>8)&0xF);
1282 load_reg( R_EAX, Rn );
1285 store_reg( R_EAX, Rn );
1286 sh4_x86.tstate = TSTATE_C;
1291 uint32_t Rn = ((ir>>8)&0xF);
1292 load_reg( R_EAX, Rn );
1293 ADD_imm8s_r32( -1, R_EAX );
1294 store_reg( R_EAX, Rn );
1296 sh4_x86.tstate = TSTATE_E;
1301 uint32_t Rn = ((ir>>8)&0xF);
1302 load_reg( R_EAX, Rn );
1305 store_reg( R_EAX, Rn );
1306 sh4_x86.tstate = TSTATE_C;
1315 switch( (ir&0xF0) >> 4 ) {
1318 uint32_t Rn = ((ir>>8)&0xF);
1319 load_reg( R_EAX, Rn );
1322 store_reg( R_EAX, Rn );
1323 sh4_x86.tstate = TSTATE_C;
1328 uint32_t Rn = ((ir>>8)&0xF);
1329 load_reg( R_EAX, Rn );
1330 CMP_imm8s_r32( 0, R_EAX );
1332 sh4_x86.tstate = TSTATE_GE;
1337 uint32_t Rn = ((ir>>8)&0xF);
1338 load_reg( R_EAX, Rn );
1341 store_reg( R_EAX, Rn );
1342 sh4_x86.tstate = TSTATE_C;
1351 switch( (ir&0xF0) >> 4 ) {
1353 { /* STS.L MACH, @-Rn */
1354 uint32_t Rn = ((ir>>8)&0xF);
1355 load_reg( R_EAX, Rn );
1356 check_walign32( R_EAX );
1357 ADD_imm8s_r32( -4, R_EAX );
1358 MMU_TRANSLATE_WRITE( R_EAX );
1359 load_spreg( R_EDX, R_MACH );
1360 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1361 MEM_WRITE_LONG( R_EAX, R_EDX );
1362 sh4_x86.tstate = TSTATE_NONE;
1366 { /* STS.L MACL, @-Rn */
1367 uint32_t Rn = ((ir>>8)&0xF);
1368 load_reg( R_EAX, Rn );
1369 check_walign32( R_EAX );
1370 ADD_imm8s_r32( -4, R_EAX );
1371 MMU_TRANSLATE_WRITE( R_EAX );
1372 load_spreg( R_EDX, R_MACL );
1373 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1374 MEM_WRITE_LONG( R_EAX, R_EDX );
1375 sh4_x86.tstate = TSTATE_NONE;
1379 { /* STS.L PR, @-Rn */
1380 uint32_t Rn = ((ir>>8)&0xF);
1381 load_reg( R_EAX, Rn );
1382 check_walign32( R_EAX );
1383 ADD_imm8s_r32( -4, R_EAX );
1384 MMU_TRANSLATE_WRITE( R_EAX );
1385 load_spreg( R_EDX, R_PR );
1386 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1387 MEM_WRITE_LONG( R_EAX, R_EDX );
1388 sh4_x86.tstate = TSTATE_NONE;
1392 { /* STC.L SGR, @-Rn */
1393 uint32_t Rn = ((ir>>8)&0xF);
1395 load_reg( R_EAX, Rn );
1396 check_walign32( R_EAX );
1397 ADD_imm8s_r32( -4, R_EAX );
1398 MMU_TRANSLATE_WRITE( R_EAX );
1399 load_spreg( R_EDX, R_SGR );
1400 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1401 MEM_WRITE_LONG( R_EAX, R_EDX );
1402 sh4_x86.tstate = TSTATE_NONE;
1406 { /* STS.L FPUL, @-Rn */
1407 uint32_t Rn = ((ir>>8)&0xF);
1408 load_reg( R_EAX, Rn );
1409 check_walign32( R_EAX );
1410 ADD_imm8s_r32( -4, R_EAX );
1411 MMU_TRANSLATE_WRITE( R_EAX );
1412 load_spreg( R_EDX, R_FPUL );
1413 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1414 MEM_WRITE_LONG( R_EAX, R_EDX );
1415 sh4_x86.tstate = TSTATE_NONE;
1419 { /* STS.L FPSCR, @-Rn */
1420 uint32_t Rn = ((ir>>8)&0xF);
1421 load_reg( R_EAX, Rn );
1422 check_walign32( R_EAX );
1423 ADD_imm8s_r32( -4, R_EAX );
1424 MMU_TRANSLATE_WRITE( R_EAX );
1425 load_spreg( R_EDX, R_FPSCR );
1426 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1427 MEM_WRITE_LONG( R_EAX, R_EDX );
1428 sh4_x86.tstate = TSTATE_NONE;
1432 { /* STC.L DBR, @-Rn */
1433 uint32_t Rn = ((ir>>8)&0xF);
1435 load_reg( R_EAX, Rn );
1436 check_walign32( R_EAX );
1437 ADD_imm8s_r32( -4, R_EAX );
1438 MMU_TRANSLATE_WRITE( R_EAX );
1439 load_spreg( R_EDX, R_DBR );
1440 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1441 MEM_WRITE_LONG( R_EAX, R_EDX );
1442 sh4_x86.tstate = TSTATE_NONE;
1451 switch( (ir&0x80) >> 7 ) {
1453 switch( (ir&0x70) >> 4 ) {
1455 { /* STC.L SR, @-Rn */
1456 uint32_t Rn = ((ir>>8)&0xF);
1458 load_reg( R_EAX, Rn );
1459 check_walign32( R_EAX );
1460 ADD_imm8s_r32( -4, R_EAX );
1461 MMU_TRANSLATE_WRITE( R_EAX );
1462 PUSH_realigned_r32( R_EAX );
1463 call_func0( sh4_read_sr );
1464 POP_realigned_r32( R_ECX );
1465 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1466 MEM_WRITE_LONG( R_ECX, R_EAX );
1467 sh4_x86.tstate = TSTATE_NONE;
1471 { /* STC.L GBR, @-Rn */
1472 uint32_t Rn = ((ir>>8)&0xF);
1473 load_reg( R_EAX, Rn );
1474 check_walign32( R_EAX );
1475 ADD_imm8s_r32( -4, R_EAX );
1476 MMU_TRANSLATE_WRITE( R_EAX );
1477 load_spreg( R_EDX, R_GBR );
1478 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1479 MEM_WRITE_LONG( R_EAX, R_EDX );
1480 sh4_x86.tstate = TSTATE_NONE;
1484 { /* STC.L VBR, @-Rn */
1485 uint32_t Rn = ((ir>>8)&0xF);
1487 load_reg( R_EAX, Rn );
1488 check_walign32( R_EAX );
1489 ADD_imm8s_r32( -4, R_EAX );
1490 MMU_TRANSLATE_WRITE( R_EAX );
1491 load_spreg( R_EDX, R_VBR );
1492 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1493 MEM_WRITE_LONG( R_EAX, R_EDX );
1494 sh4_x86.tstate = TSTATE_NONE;
1498 { /* STC.L SSR, @-Rn */
1499 uint32_t Rn = ((ir>>8)&0xF);
1501 load_reg( R_EAX, Rn );
1502 check_walign32( R_EAX );
1503 ADD_imm8s_r32( -4, R_EAX );
1504 MMU_TRANSLATE_WRITE( R_EAX );
1505 load_spreg( R_EDX, R_SSR );
1506 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1507 MEM_WRITE_LONG( R_EAX, R_EDX );
1508 sh4_x86.tstate = TSTATE_NONE;
1512 { /* STC.L SPC, @-Rn */
1513 uint32_t Rn = ((ir>>8)&0xF);
1515 load_reg( R_EAX, Rn );
1516 check_walign32( R_EAX );
1517 ADD_imm8s_r32( -4, R_EAX );
1518 MMU_TRANSLATE_WRITE( R_EAX );
1519 load_spreg( R_EDX, R_SPC );
1520 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1521 MEM_WRITE_LONG( R_EAX, R_EDX );
1522 sh4_x86.tstate = TSTATE_NONE;
1531 { /* STC.L Rm_BANK, @-Rn */
1532 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm_BANK = ((ir>>4)&0x7);
1534 load_reg( R_EAX, Rn );
1535 check_walign32( R_EAX );
1536 ADD_imm8s_r32( -4, R_EAX );
1537 MMU_TRANSLATE_WRITE( R_EAX );
1538 load_spreg( R_EDX, REG_OFFSET(r_bank[Rm_BANK]) );
1539 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1540 MEM_WRITE_LONG( R_EAX, R_EDX );
1541 sh4_x86.tstate = TSTATE_NONE;
1547 switch( (ir&0xF0) >> 4 ) {
1550 uint32_t Rn = ((ir>>8)&0xF);
1551 load_reg( R_EAX, Rn );
1553 store_reg( R_EAX, Rn );
1555 sh4_x86.tstate = TSTATE_C;
1560 uint32_t Rn = ((ir>>8)&0xF);
1561 load_reg( R_EAX, Rn );
1562 if( sh4_x86.tstate != TSTATE_C ) {
1566 store_reg( R_EAX, Rn );
1568 sh4_x86.tstate = TSTATE_C;
1577 switch( (ir&0xF0) >> 4 ) {
1580 uint32_t Rn = ((ir>>8)&0xF);
1581 load_reg( R_EAX, Rn );
1583 store_reg( R_EAX, Rn );
1585 sh4_x86.tstate = TSTATE_C;
1590 uint32_t Rn = ((ir>>8)&0xF);
1591 load_reg( R_EAX, Rn );
1592 CMP_imm8s_r32( 0, R_EAX );
1594 sh4_x86.tstate = TSTATE_G;
1599 uint32_t Rn = ((ir>>8)&0xF);
1600 load_reg( R_EAX, Rn );
1601 if( sh4_x86.tstate != TSTATE_C ) {
1605 store_reg( R_EAX, Rn );
1607 sh4_x86.tstate = TSTATE_C;
1616 switch( (ir&0xF0) >> 4 ) {
1618 { /* LDS.L @Rm+, MACH */
1619 uint32_t Rm = ((ir>>8)&0xF);
1620 load_reg( R_EAX, Rm );
1621 check_ralign32( R_EAX );
1622 MMU_TRANSLATE_READ( R_EAX );
1623 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1624 MEM_READ_LONG( R_EAX, R_EAX );
1625 store_spreg( R_EAX, R_MACH );
1626 sh4_x86.tstate = TSTATE_NONE;
1630 { /* LDS.L @Rm+, MACL */
1631 uint32_t Rm = ((ir>>8)&0xF);
1632 load_reg( R_EAX, Rm );
1633 check_ralign32( R_EAX );
1634 MMU_TRANSLATE_READ( R_EAX );
1635 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1636 MEM_READ_LONG( R_EAX, R_EAX );
1637 store_spreg( R_EAX, R_MACL );
1638 sh4_x86.tstate = TSTATE_NONE;
1642 { /* LDS.L @Rm+, PR */
1643 uint32_t Rm = ((ir>>8)&0xF);
1644 load_reg( R_EAX, Rm );
1645 check_ralign32( R_EAX );
1646 MMU_TRANSLATE_READ( R_EAX );
1647 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1648 MEM_READ_LONG( R_EAX, R_EAX );
1649 store_spreg( R_EAX, R_PR );
1650 sh4_x86.tstate = TSTATE_NONE;
1654 { /* LDC.L @Rm+, SGR */
1655 uint32_t Rm = ((ir>>8)&0xF);
1657 load_reg( R_EAX, Rm );
1658 check_ralign32( R_EAX );
1659 MMU_TRANSLATE_READ( R_EAX );
1660 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1661 MEM_READ_LONG( R_EAX, R_EAX );
1662 store_spreg( R_EAX, R_SGR );
1663 sh4_x86.tstate = TSTATE_NONE;
1667 { /* LDS.L @Rm+, FPUL */
1668 uint32_t Rm = ((ir>>8)&0xF);
1669 load_reg( R_EAX, Rm );
1670 check_ralign32( R_EAX );
1671 MMU_TRANSLATE_READ( R_EAX );
1672 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1673 MEM_READ_LONG( R_EAX, R_EAX );
1674 store_spreg( R_EAX, R_FPUL );
1675 sh4_x86.tstate = TSTATE_NONE;
1679 { /* LDS.L @Rm+, FPSCR */
1680 uint32_t Rm = ((ir>>8)&0xF);
1681 load_reg( R_EAX, Rm );
1682 check_ralign32( R_EAX );
1683 MMU_TRANSLATE_READ( R_EAX );
1684 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1685 MEM_READ_LONG( R_EAX, R_EAX );
1686 store_spreg( R_EAX, R_FPSCR );
1687 update_fr_bank( R_EAX );
1688 sh4_x86.tstate = TSTATE_NONE;
1692 { /* LDC.L @Rm+, DBR */
1693 uint32_t Rm = ((ir>>8)&0xF);
1695 load_reg( R_EAX, Rm );
1696 check_ralign32( R_EAX );
1697 MMU_TRANSLATE_READ( R_EAX );
1698 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1699 MEM_READ_LONG( R_EAX, R_EAX );
1700 store_spreg( R_EAX, R_DBR );
1701 sh4_x86.tstate = TSTATE_NONE;
1710 switch( (ir&0x80) >> 7 ) {
1712 switch( (ir&0x70) >> 4 ) {
1714 { /* LDC.L @Rm+, SR */
1715 uint32_t Rm = ((ir>>8)&0xF);
1716 if( sh4_x86.in_delay_slot ) {
1720 load_reg( R_EAX, Rm );
1721 check_ralign32( R_EAX );
1722 MMU_TRANSLATE_READ( R_EAX );
1723 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1724 MEM_READ_LONG( R_EAX, R_EAX );
1725 call_func1( sh4_write_sr, R_EAX );
1726 sh4_x86.priv_checked = FALSE;
1727 sh4_x86.fpuen_checked = FALSE;
1728 sh4_x86.tstate = TSTATE_NONE;
1733 { /* LDC.L @Rm+, GBR */
1734 uint32_t Rm = ((ir>>8)&0xF);
1735 load_reg( R_EAX, Rm );
1736 check_ralign32( R_EAX );
1737 MMU_TRANSLATE_READ( R_EAX );
1738 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1739 MEM_READ_LONG( R_EAX, R_EAX );
1740 store_spreg( R_EAX, R_GBR );
1741 sh4_x86.tstate = TSTATE_NONE;
1745 { /* LDC.L @Rm+, VBR */
1746 uint32_t Rm = ((ir>>8)&0xF);
1748 load_reg( R_EAX, Rm );
1749 check_ralign32( R_EAX );
1750 MMU_TRANSLATE_READ( R_EAX );
1751 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1752 MEM_READ_LONG( R_EAX, R_EAX );
1753 store_spreg( R_EAX, R_VBR );
1754 sh4_x86.tstate = TSTATE_NONE;
1758 { /* LDC.L @Rm+, SSR */
1759 uint32_t Rm = ((ir>>8)&0xF);
1761 load_reg( R_EAX, Rm );
1762 check_ralign32( R_EAX );
1763 MMU_TRANSLATE_READ( R_EAX );
1764 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1765 MEM_READ_LONG( R_EAX, R_EAX );
1766 store_spreg( R_EAX, R_SSR );
1767 sh4_x86.tstate = TSTATE_NONE;
1771 { /* LDC.L @Rm+, SPC */
1772 uint32_t Rm = ((ir>>8)&0xF);
1774 load_reg( R_EAX, Rm );
1775 check_ralign32( R_EAX );
1776 MMU_TRANSLATE_READ( R_EAX );
1777 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1778 MEM_READ_LONG( R_EAX, R_EAX );
1779 store_spreg( R_EAX, R_SPC );
1780 sh4_x86.tstate = TSTATE_NONE;
1789 { /* LDC.L @Rm+, Rn_BANK */
1790 uint32_t Rm = ((ir>>8)&0xF); uint32_t Rn_BANK = ((ir>>4)&0x7);
1792 load_reg( R_EAX, Rm );
1793 check_ralign32( R_EAX );
1794 MMU_TRANSLATE_READ( R_EAX );
1795 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1796 MEM_READ_LONG( R_EAX, R_EAX );
1797 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
1798 sh4_x86.tstate = TSTATE_NONE;
1804 switch( (ir&0xF0) >> 4 ) {
1807 uint32_t Rn = ((ir>>8)&0xF);
1808 load_reg( R_EAX, Rn );
1809 SHL_imm8_r32( 2, R_EAX );
1810 store_reg( R_EAX, Rn );
1811 sh4_x86.tstate = TSTATE_NONE;
1816 uint32_t Rn = ((ir>>8)&0xF);
1817 load_reg( R_EAX, Rn );
1818 SHL_imm8_r32( 8, R_EAX );
1819 store_reg( R_EAX, Rn );
1820 sh4_x86.tstate = TSTATE_NONE;
1825 uint32_t Rn = ((ir>>8)&0xF);
1826 load_reg( R_EAX, Rn );
1827 SHL_imm8_r32( 16, R_EAX );
1828 store_reg( R_EAX, Rn );
1829 sh4_x86.tstate = TSTATE_NONE;
1838 switch( (ir&0xF0) >> 4 ) {
1841 uint32_t Rn = ((ir>>8)&0xF);
1842 load_reg( R_EAX, Rn );
1843 SHR_imm8_r32( 2, R_EAX );
1844 store_reg( R_EAX, Rn );
1845 sh4_x86.tstate = TSTATE_NONE;
1850 uint32_t Rn = ((ir>>8)&0xF);
1851 load_reg( R_EAX, Rn );
1852 SHR_imm8_r32( 8, R_EAX );
1853 store_reg( R_EAX, Rn );
1854 sh4_x86.tstate = TSTATE_NONE;
1859 uint32_t Rn = ((ir>>8)&0xF);
1860 load_reg( R_EAX, Rn );
1861 SHR_imm8_r32( 16, R_EAX );
1862 store_reg( R_EAX, Rn );
1863 sh4_x86.tstate = TSTATE_NONE;
1872 switch( (ir&0xF0) >> 4 ) {
1874 { /* LDS Rm, MACH */
1875 uint32_t Rm = ((ir>>8)&0xF);
1876 load_reg( R_EAX, Rm );
1877 store_spreg( R_EAX, R_MACH );
1881 { /* LDS Rm, MACL */
1882 uint32_t Rm = ((ir>>8)&0xF);
1883 load_reg( R_EAX, Rm );
1884 store_spreg( R_EAX, R_MACL );
1889 uint32_t Rm = ((ir>>8)&0xF);
1890 load_reg( R_EAX, Rm );
1891 store_spreg( R_EAX, R_PR );
1896 uint32_t Rm = ((ir>>8)&0xF);
1898 load_reg( R_EAX, Rm );
1899 store_spreg( R_EAX, R_SGR );
1900 sh4_x86.tstate = TSTATE_NONE;
1904 { /* LDS Rm, FPUL */
1905 uint32_t Rm = ((ir>>8)&0xF);
1906 load_reg( R_EAX, Rm );
1907 store_spreg( R_EAX, R_FPUL );
1911 { /* LDS Rm, FPSCR */
1912 uint32_t Rm = ((ir>>8)&0xF);
1913 load_reg( R_EAX, Rm );
1914 store_spreg( R_EAX, R_FPSCR );
1915 update_fr_bank( R_EAX );
1916 sh4_x86.tstate = TSTATE_NONE;
1921 uint32_t Rm = ((ir>>8)&0xF);
1923 load_reg( R_EAX, Rm );
1924 store_spreg( R_EAX, R_DBR );
1925 sh4_x86.tstate = TSTATE_NONE;
1934 switch( (ir&0xF0) >> 4 ) {
1937 uint32_t Rn = ((ir>>8)&0xF);
1938 if( sh4_x86.in_delay_slot ) {
1941 load_spreg( R_EAX, R_PC );
1942 ADD_imm32_r32( pc + 4 - sh4_x86.block_start_pc, R_EAX );
1943 store_spreg( R_EAX, R_PR );
1944 load_reg( R_ECX, Rn );
1945 store_spreg( R_ECX, R_NEW_PC );
1946 sh4_translate_instruction(pc+2);
1947 exit_block_newpcset(pc+2);
1948 sh4_x86.branch_taken = TRUE;
1955 uint32_t Rn = ((ir>>8)&0xF);
1956 load_reg( R_EAX, Rn );
1957 MMU_TRANSLATE_WRITE( R_EAX );
1958 PUSH_realigned_r32( R_EAX );
1959 MEM_READ_BYTE( R_EAX, R_EAX );
1960 TEST_r8_r8( R_AL, R_AL );
1962 OR_imm8_r8( 0x80, R_AL );
1963 POP_realigned_r32( R_ECX );
1964 MEM_WRITE_BYTE( R_ECX, R_EAX );
1965 sh4_x86.tstate = TSTATE_NONE;
1970 uint32_t Rn = ((ir>>8)&0xF);
1971 if( sh4_x86.in_delay_slot ) {
1974 load_reg( R_ECX, Rn );
1975 store_spreg( R_ECX, R_NEW_PC );
1976 sh4_x86.in_delay_slot = DELAY_PC;
1977 sh4_translate_instruction(pc+2);
1978 exit_block_newpcset(pc+2);
1979 sh4_x86.branch_taken = TRUE;
1991 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1992 /* Annoyingly enough, not directly convertible */
1993 load_reg( R_EAX, Rn );
1994 load_reg( R_ECX, Rm );
1995 CMP_imm32_r32( 0, R_ECX );
1996 JGE_rel8(16, doshl);
1998 NEG_r32( R_ECX ); // 2
1999 AND_imm8_r8( 0x1F, R_CL ); // 3
2000 JE_rel8( 4, emptysar); // 2
2001 SAR_r32_CL( R_EAX ); // 2
2002 JMP_rel8(10, end); // 2
2004 JMP_TARGET(emptysar);
2005 SAR_imm8_r32(31, R_EAX ); // 3
2009 AND_imm8_r8( 0x1F, R_CL ); // 3
2010 SHL_r32_CL( R_EAX ); // 2
2013 store_reg( R_EAX, Rn );
2014 sh4_x86.tstate = TSTATE_NONE;
2019 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2020 load_reg( R_EAX, Rn );
2021 load_reg( R_ECX, Rm );
2022 CMP_imm32_r32( 0, R_ECX );
2023 JGE_rel8(15, doshl);
2025 NEG_r32( R_ECX ); // 2
2026 AND_imm8_r8( 0x1F, R_CL ); // 3
2027 JE_rel8( 4, emptyshr );
2028 SHR_r32_CL( R_EAX ); // 2
2029 JMP_rel8(9, end); // 2
2031 JMP_TARGET(emptyshr);
2032 XOR_r32_r32( R_EAX, R_EAX );
2036 AND_imm8_r8( 0x1F, R_CL ); // 3
2037 SHL_r32_CL( R_EAX ); // 2
2040 store_reg( R_EAX, Rn );
2041 sh4_x86.tstate = TSTATE_NONE;
2045 switch( (ir&0x80) >> 7 ) {
2047 switch( (ir&0x70) >> 4 ) {
2050 uint32_t Rm = ((ir>>8)&0xF);
2051 if( sh4_x86.in_delay_slot ) {
2055 load_reg( R_EAX, Rm );
2056 call_func1( sh4_write_sr, R_EAX );
2057 sh4_x86.priv_checked = FALSE;
2058 sh4_x86.fpuen_checked = FALSE;
2059 sh4_x86.tstate = TSTATE_NONE;
2065 uint32_t Rm = ((ir>>8)&0xF);
2066 load_reg( R_EAX, Rm );
2067 store_spreg( R_EAX, R_GBR );
2072 uint32_t Rm = ((ir>>8)&0xF);
2074 load_reg( R_EAX, Rm );
2075 store_spreg( R_EAX, R_VBR );
2076 sh4_x86.tstate = TSTATE_NONE;
2081 uint32_t Rm = ((ir>>8)&0xF);
2083 load_reg( R_EAX, Rm );
2084 store_spreg( R_EAX, R_SSR );
2085 sh4_x86.tstate = TSTATE_NONE;
2090 uint32_t Rm = ((ir>>8)&0xF);
2092 load_reg( R_EAX, Rm );
2093 store_spreg( R_EAX, R_SPC );
2094 sh4_x86.tstate = TSTATE_NONE;
2103 { /* LDC Rm, Rn_BANK */
2104 uint32_t Rm = ((ir>>8)&0xF); uint32_t Rn_BANK = ((ir>>4)&0x7);
2106 load_reg( R_EAX, Rm );
2107 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
2108 sh4_x86.tstate = TSTATE_NONE;
2114 { /* MAC.W @Rm+, @Rn+ */
2115 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2117 load_reg( R_EAX, Rm );
2118 check_ralign16( R_EAX );
2119 MMU_TRANSLATE_READ( R_EAX );
2120 PUSH_realigned_r32( R_EAX );
2121 load_reg( R_EAX, Rn );
2122 ADD_imm8s_r32( 2, R_EAX );
2123 MMU_TRANSLATE_READ( R_EAX );
2124 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
2125 // Note translate twice in case of page boundaries. Maybe worth
2126 // adding a page-boundary check to skip the second translation
2128 load_reg( R_EAX, Rm );
2129 check_ralign16( R_EAX );
2130 MMU_TRANSLATE_READ( R_EAX );
2131 PUSH_realigned_r32( R_EAX );
2132 load_reg( R_EAX, Rn );
2133 check_ralign16( R_EAX );
2134 MMU_TRANSLATE_READ( R_EAX );
2135 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
2136 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
2138 MEM_READ_WORD( R_EAX, R_EAX );
2141 MEM_READ_WORD( R_ECX, R_EAX );
2142 POP_realigned_r32( R_ECX );
2145 load_spreg( R_ECX, R_S );
2146 TEST_r32_r32( R_ECX, R_ECX );
2147 JE_rel8( 47, nosat );
2149 ADD_r32_sh4r( R_EAX, R_MACL ); // 6
2150 JNO_rel8( 51, end ); // 2
2151 load_imm32( R_EDX, 1 ); // 5
2152 store_spreg( R_EDX, R_MACH ); // 6
2153 JS_rel8( 13, positive ); // 2
2154 load_imm32( R_EAX, 0x80000000 );// 5
2155 store_spreg( R_EAX, R_MACL ); // 6
2156 JMP_rel8( 25, end2 ); // 2
2158 JMP_TARGET(positive);
2159 load_imm32( R_EAX, 0x7FFFFFFF );// 5
2160 store_spreg( R_EAX, R_MACL ); // 6
2161 JMP_rel8( 12, end3); // 2
2164 ADD_r32_sh4r( R_EAX, R_MACL ); // 6
2165 ADC_r32_sh4r( R_EDX, R_MACH ); // 6
2169 sh4_x86.tstate = TSTATE_NONE;
2175 { /* MOV.L @(disp, Rm), Rn */
2176 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2;
2177 load_reg( R_EAX, Rm );
2178 ADD_imm8s_r32( disp, R_EAX );
2179 check_ralign32( R_EAX );
2180 MMU_TRANSLATE_READ( R_EAX );
2181 MEM_READ_LONG( R_EAX, R_EAX );
2182 store_reg( R_EAX, Rn );
2183 sh4_x86.tstate = TSTATE_NONE;
2189 { /* MOV.B @Rm, Rn */
2190 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2191 load_reg( R_EAX, Rm );
2192 MMU_TRANSLATE_READ( R_EAX );
2193 MEM_READ_BYTE( R_EAX, R_EAX );
2194 store_reg( R_EAX, Rn );
2195 sh4_x86.tstate = TSTATE_NONE;
2199 { /* MOV.W @Rm, Rn */
2200 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2201 load_reg( R_EAX, Rm );
2202 check_ralign16( R_EAX );
2203 MMU_TRANSLATE_READ( R_EAX );
2204 MEM_READ_WORD( R_EAX, R_EAX );
2205 store_reg( R_EAX, Rn );
2206 sh4_x86.tstate = TSTATE_NONE;
2210 { /* MOV.L @Rm, Rn */
2211 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2212 load_reg( R_EAX, Rm );
2213 check_ralign32( R_EAX );
2214 MMU_TRANSLATE_READ( R_EAX );
2215 MEM_READ_LONG( R_EAX, R_EAX );
2216 store_reg( R_EAX, Rn );
2217 sh4_x86.tstate = TSTATE_NONE;
2222 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2223 load_reg( R_EAX, Rm );
2224 store_reg( R_EAX, Rn );
2228 { /* MOV.B @Rm+, Rn */
2229 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2230 load_reg( R_EAX, Rm );
2231 MMU_TRANSLATE_READ( R_EAX );
2232 ADD_imm8s_sh4r( 1, REG_OFFSET(r[Rm]) );
2233 MEM_READ_BYTE( R_EAX, R_EAX );
2234 store_reg( R_EAX, Rn );
2235 sh4_x86.tstate = TSTATE_NONE;
2239 { /* MOV.W @Rm+, Rn */
2240 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2241 load_reg( R_EAX, Rm );
2242 check_ralign16( R_EAX );
2243 MMU_TRANSLATE_READ( R_EAX );
2244 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
2245 MEM_READ_WORD( R_EAX, R_EAX );
2246 store_reg( R_EAX, Rn );
2247 sh4_x86.tstate = TSTATE_NONE;
2251 { /* MOV.L @Rm+, Rn */
2252 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2253 load_reg( R_EAX, Rm );
2254 check_ralign32( R_EAX );
2255 MMU_TRANSLATE_READ( R_EAX );
2256 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2257 MEM_READ_LONG( R_EAX, R_EAX );
2258 store_reg( R_EAX, Rn );
2259 sh4_x86.tstate = TSTATE_NONE;
2264 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2265 load_reg( R_EAX, Rm );
2267 store_reg( R_EAX, Rn );
2268 sh4_x86.tstate = TSTATE_NONE;
2272 { /* SWAP.B Rm, Rn */
2273 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2274 load_reg( R_EAX, Rm );
2275 XCHG_r8_r8( R_AL, R_AH );
2276 store_reg( R_EAX, Rn );
2280 { /* SWAP.W Rm, Rn */
2281 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2282 load_reg( R_EAX, Rm );
2283 MOV_r32_r32( R_EAX, R_ECX );
2284 SHL_imm8_r32( 16, R_ECX );
2285 SHR_imm8_r32( 16, R_EAX );
2286 OR_r32_r32( R_EAX, R_ECX );
2287 store_reg( R_ECX, Rn );
2288 sh4_x86.tstate = TSTATE_NONE;
2293 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2294 load_reg( R_EAX, Rm );
2295 XOR_r32_r32( R_ECX, R_ECX );
2297 SBB_r32_r32( R_EAX, R_ECX );
2298 store_reg( R_ECX, Rn );
2300 sh4_x86.tstate = TSTATE_C;
2305 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2306 load_reg( R_EAX, Rm );
2308 store_reg( R_EAX, Rn );
2309 sh4_x86.tstate = TSTATE_NONE;
2313 { /* EXTU.B Rm, Rn */
2314 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2315 load_reg( R_EAX, Rm );
2316 MOVZX_r8_r32( R_EAX, R_EAX );
2317 store_reg( R_EAX, Rn );
2321 { /* EXTU.W Rm, Rn */
2322 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2323 load_reg( R_EAX, Rm );
2324 MOVZX_r16_r32( R_EAX, R_EAX );
2325 store_reg( R_EAX, Rn );
2329 { /* EXTS.B Rm, Rn */
2330 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2331 load_reg( R_EAX, Rm );
2332 MOVSX_r8_r32( R_EAX, R_EAX );
2333 store_reg( R_EAX, Rn );
2337 { /* EXTS.W Rm, Rn */
2338 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2339 load_reg( R_EAX, Rm );
2340 MOVSX_r16_r32( R_EAX, R_EAX );
2341 store_reg( R_EAX, Rn );
2347 { /* ADD #imm, Rn */
2348 uint32_t Rn = ((ir>>8)&0xF); int32_t imm = SIGNEXT8(ir&0xFF);
2349 load_reg( R_EAX, Rn );
2350 ADD_imm8s_r32( imm, R_EAX );
2351 store_reg( R_EAX, Rn );
2352 sh4_x86.tstate = TSTATE_NONE;
2356 switch( (ir&0xF00) >> 8 ) {
2358 { /* MOV.B R0, @(disp, Rn) */
2359 uint32_t Rn = ((ir>>4)&0xF); uint32_t disp = (ir&0xF);
2360 load_reg( R_EAX, Rn );
2361 ADD_imm32_r32( disp, R_EAX );
2362 MMU_TRANSLATE_WRITE( R_EAX );
2363 load_reg( R_EDX, 0 );
2364 MEM_WRITE_BYTE( R_EAX, R_EDX );
2365 sh4_x86.tstate = TSTATE_NONE;
2369 { /* MOV.W R0, @(disp, Rn) */
2370 uint32_t Rn = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1;
2371 load_reg( R_EAX, Rn );
2372 ADD_imm32_r32( disp, R_EAX );
2373 check_walign16( R_EAX );
2374 MMU_TRANSLATE_WRITE( R_EAX );
2375 load_reg( R_EDX, 0 );
2376 MEM_WRITE_WORD( R_EAX, R_EDX );
2377 sh4_x86.tstate = TSTATE_NONE;
2381 { /* MOV.B @(disp, Rm), R0 */
2382 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF);
2383 load_reg( R_EAX, Rm );
2384 ADD_imm32_r32( disp, R_EAX );
2385 MMU_TRANSLATE_READ( R_EAX );
2386 MEM_READ_BYTE( R_EAX, R_EAX );
2387 store_reg( R_EAX, 0 );
2388 sh4_x86.tstate = TSTATE_NONE;
2392 { /* MOV.W @(disp, Rm), R0 */
2393 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1;
2394 load_reg( R_EAX, Rm );
2395 ADD_imm32_r32( disp, R_EAX );
2396 check_ralign16( R_EAX );
2397 MMU_TRANSLATE_READ( R_EAX );
2398 MEM_READ_WORD( R_EAX, R_EAX );
2399 store_reg( R_EAX, 0 );
2400 sh4_x86.tstate = TSTATE_NONE;
2404 { /* CMP/EQ #imm, R0 */
2405 int32_t imm = SIGNEXT8(ir&0xFF);
2406 load_reg( R_EAX, 0 );
2407 CMP_imm8s_r32(imm, R_EAX);
2409 sh4_x86.tstate = TSTATE_E;
2414 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2415 if( sh4_x86.in_delay_slot ) {
2418 sh4vma_t target = disp + pc + 4;
2419 JF_rel8( EXIT_BLOCK_REL_SIZE(target), nottaken );
2420 exit_block_rel(target, pc+2 );
2421 JMP_TARGET(nottaken);
2428 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2429 if( sh4_x86.in_delay_slot ) {
2432 sh4vma_t target = disp + pc + 4;
2433 JT_rel8( EXIT_BLOCK_REL_SIZE(target), nottaken );
2434 exit_block_rel(target, pc+2 );
2435 JMP_TARGET(nottaken);
2442 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2443 if( sh4_x86.in_delay_slot ) {
2446 sh4_x86.in_delay_slot = DELAY_PC;
2447 if( sh4_x86.tstate == TSTATE_NONE ) {
2448 CMP_imm8s_sh4r( 1, R_T );
2449 sh4_x86.tstate = TSTATE_E;
2451 OP(0x0F); OP(0x80+(sh4_x86.tstate^1)); uint32_t *patch = (uint32_t *)xlat_output; OP32(0); // JE rel32
2452 sh4_translate_instruction(pc+2);
2453 exit_block_rel( disp + pc + 4, pc+4 );
2455 *patch = (xlat_output - ((uint8_t *)patch)) - 4;
2456 sh4_translate_instruction(pc+2);
2463 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2464 if( sh4_x86.in_delay_slot ) {
2467 sh4vma_t target = disp + pc + 4;
2468 sh4_x86.in_delay_slot = DELAY_PC;
2469 if( sh4_x86.tstate == TSTATE_NONE ) {
2470 CMP_imm8s_sh4r( 1, R_T );
2471 sh4_x86.tstate = TSTATE_E;
2473 OP(0x0F); OP(0x80+sh4_x86.tstate); uint32_t *patch = (uint32_t *)xlat_output; OP32(0); // JNE rel32
2474 sh4_translate_instruction(pc+2);
2475 exit_block_rel( target, pc+4 );
2477 *patch = (xlat_output - ((uint8_t *)patch)) - 4;
2478 sh4_translate_instruction(pc+2);
2489 { /* MOV.W @(disp, PC), Rn */
2490 uint32_t Rn = ((ir>>8)&0xF); uint32_t disp = (ir&0xFF)<<1;
2491 if( sh4_x86.in_delay_slot ) {
2494 // See comments for MOV.L @(disp, PC), Rn
2495 uint32_t target = pc + disp + 4;
2496 if( IS_IN_ICACHE(target) ) {
2497 sh4ptr_t ptr = GET_ICACHE_PTR(target);
2498 MOV_moff32_EAX( ptr );
2499 MOVSX_r16_r32( R_EAX, R_EAX );
2501 load_imm32( R_EAX, (pc - sh4_x86.block_start_pc) + disp + 4 );
2502 ADD_sh4r_r32( R_PC, R_EAX );
2503 MMU_TRANSLATE_READ( R_EAX );
2504 MEM_READ_WORD( R_EAX, R_EAX );
2505 sh4_x86.tstate = TSTATE_NONE;
2507 store_reg( R_EAX, Rn );
2513 int32_t disp = SIGNEXT12(ir&0xFFF)<<1;
2514 if( sh4_x86.in_delay_slot ) {
2517 sh4_x86.in_delay_slot = DELAY_PC;
2518 sh4_translate_instruction( pc + 2 );
2519 exit_block_rel( disp + pc + 4, pc+4 );
2520 sh4_x86.branch_taken = TRUE;
2527 int32_t disp = SIGNEXT12(ir&0xFFF)<<1;
2528 if( sh4_x86.in_delay_slot ) {
2531 load_spreg( R_EAX, R_PC );
2532 ADD_imm32_r32( pc + 4 - sh4_x86.block_start_pc, R_EAX );
2533 store_spreg( R_EAX, R_PR );
2534 sh4_x86.in_delay_slot = DELAY_PC;
2535 sh4_translate_instruction( pc + 2 );
2536 exit_block_rel( disp + pc + 4, pc+4 );
2537 sh4_x86.branch_taken = TRUE;
2543 switch( (ir&0xF00) >> 8 ) {
2545 { /* MOV.B R0, @(disp, GBR) */
2546 uint32_t disp = (ir&0xFF);
2547 load_spreg( R_EAX, R_GBR );
2548 ADD_imm32_r32( disp, R_EAX );
2549 MMU_TRANSLATE_WRITE( R_EAX );
2550 load_reg( R_EDX, 0 );
2551 MEM_WRITE_BYTE( R_EAX, R_EDX );
2552 sh4_x86.tstate = TSTATE_NONE;
2556 { /* MOV.W R0, @(disp, GBR) */
2557 uint32_t disp = (ir&0xFF)<<1;
2558 load_spreg( R_EAX, R_GBR );
2559 ADD_imm32_r32( disp, R_EAX );
2560 check_walign16( R_EAX );
2561 MMU_TRANSLATE_WRITE( R_EAX );
2562 load_reg( R_EDX, 0 );
2563 MEM_WRITE_WORD( R_EAX, R_EDX );
2564 sh4_x86.tstate = TSTATE_NONE;
2568 { /* MOV.L R0, @(disp, GBR) */
2569 uint32_t disp = (ir&0xFF)<<2;
2570 load_spreg( R_EAX, R_GBR );
2571 ADD_imm32_r32( disp, R_EAX );
2572 check_walign32( R_EAX );
2573 MMU_TRANSLATE_WRITE( R_EAX );
2574 load_reg( R_EDX, 0 );
2575 MEM_WRITE_LONG( R_EAX, R_EDX );
2576 sh4_x86.tstate = TSTATE_NONE;
2581 uint32_t imm = (ir&0xFF);
2582 if( sh4_x86.in_delay_slot ) {
2585 load_imm32( R_ECX, pc+2 - sh4_x86.block_start_pc ); // 5
2586 ADD_r32_sh4r( R_ECX, R_PC );
2587 load_imm32( R_EAX, imm );
2588 call_func1( sh4_raise_trap, R_EAX );
2589 sh4_x86.tstate = TSTATE_NONE;
2590 exit_block_pcset(pc);
2591 sh4_x86.branch_taken = TRUE;
2597 { /* MOV.B @(disp, GBR), R0 */
2598 uint32_t disp = (ir&0xFF);
2599 load_spreg( R_EAX, R_GBR );
2600 ADD_imm32_r32( disp, R_EAX );
2601 MMU_TRANSLATE_READ( R_EAX );
2602 MEM_READ_BYTE( R_EAX, R_EAX );
2603 store_reg( R_EAX, 0 );
2604 sh4_x86.tstate = TSTATE_NONE;
2608 { /* MOV.W @(disp, GBR), R0 */
2609 uint32_t disp = (ir&0xFF)<<1;
2610 load_spreg( R_EAX, R_GBR );
2611 ADD_imm32_r32( disp, R_EAX );
2612 check_ralign16( R_EAX );
2613 MMU_TRANSLATE_READ( R_EAX );
2614 MEM_READ_WORD( R_EAX, R_EAX );
2615 store_reg( R_EAX, 0 );
2616 sh4_x86.tstate = TSTATE_NONE;
2620 { /* MOV.L @(disp, GBR), R0 */
2621 uint32_t disp = (ir&0xFF)<<2;
2622 load_spreg( R_EAX, R_GBR );
2623 ADD_imm32_r32( disp, R_EAX );
2624 check_ralign32( R_EAX );
2625 MMU_TRANSLATE_READ( R_EAX );
2626 MEM_READ_LONG( R_EAX, R_EAX );
2627 store_reg( R_EAX, 0 );
2628 sh4_x86.tstate = TSTATE_NONE;
2632 { /* MOVA @(disp, PC), R0 */
2633 uint32_t disp = (ir&0xFF)<<2;
2634 if( sh4_x86.in_delay_slot ) {
2637 load_imm32( R_ECX, (pc - sh4_x86.block_start_pc) + disp + 4 - (pc&0x03) );
2638 ADD_sh4r_r32( R_PC, R_ECX );
2639 store_reg( R_ECX, 0 );
2640 sh4_x86.tstate = TSTATE_NONE;
2645 { /* TST #imm, R0 */
2646 uint32_t imm = (ir&0xFF);
2647 load_reg( R_EAX, 0 );
2648 TEST_imm32_r32( imm, R_EAX );
2650 sh4_x86.tstate = TSTATE_E;
2654 { /* AND #imm, R0 */
2655 uint32_t imm = (ir&0xFF);
2656 load_reg( R_EAX, 0 );
2657 AND_imm32_r32(imm, R_EAX);
2658 store_reg( R_EAX, 0 );
2659 sh4_x86.tstate = TSTATE_NONE;
2663 { /* XOR #imm, R0 */
2664 uint32_t imm = (ir&0xFF);
2665 load_reg( R_EAX, 0 );
2666 XOR_imm32_r32( imm, R_EAX );
2667 store_reg( R_EAX, 0 );
2668 sh4_x86.tstate = TSTATE_NONE;
2673 uint32_t imm = (ir&0xFF);
2674 load_reg( R_EAX, 0 );
2675 OR_imm32_r32(imm, R_EAX);
2676 store_reg( R_EAX, 0 );
2677 sh4_x86.tstate = TSTATE_NONE;
2681 { /* TST.B #imm, @(R0, GBR) */
2682 uint32_t imm = (ir&0xFF);
2683 load_reg( R_EAX, 0);
2684 load_reg( R_ECX, R_GBR);
2685 ADD_r32_r32( R_ECX, R_EAX );
2686 MMU_TRANSLATE_READ( R_EAX );
2687 MEM_READ_BYTE( R_EAX, R_EAX );
2688 TEST_imm8_r8( imm, R_AL );
2690 sh4_x86.tstate = TSTATE_E;
2694 { /* AND.B #imm, @(R0, GBR) */
2695 uint32_t imm = (ir&0xFF);
2696 load_reg( R_EAX, 0 );
2697 load_spreg( R_ECX, R_GBR );
2698 ADD_r32_r32( R_ECX, R_EAX );
2699 MMU_TRANSLATE_WRITE( R_EAX );
2700 PUSH_realigned_r32(R_EAX);
2701 MEM_READ_BYTE( R_EAX, R_EAX );
2702 POP_realigned_r32(R_ECX);
2703 AND_imm32_r32(imm, R_EAX );
2704 MEM_WRITE_BYTE( R_ECX, R_EAX );
2705 sh4_x86.tstate = TSTATE_NONE;
2709 { /* XOR.B #imm, @(R0, GBR) */
2710 uint32_t imm = (ir&0xFF);
2711 load_reg( R_EAX, 0 );
2712 load_spreg( R_ECX, R_GBR );
2713 ADD_r32_r32( R_ECX, R_EAX );
2714 MMU_TRANSLATE_WRITE( R_EAX );
2715 PUSH_realigned_r32(R_EAX);
2716 MEM_READ_BYTE(R_EAX, R_EAX);
2717 POP_realigned_r32(R_ECX);
2718 XOR_imm32_r32( imm, R_EAX );
2719 MEM_WRITE_BYTE( R_ECX, R_EAX );
2720 sh4_x86.tstate = TSTATE_NONE;
2724 { /* OR.B #imm, @(R0, GBR) */
2725 uint32_t imm = (ir&0xFF);
2726 load_reg( R_EAX, 0 );
2727 load_spreg( R_ECX, R_GBR );
2728 ADD_r32_r32( R_ECX, R_EAX );
2729 MMU_TRANSLATE_WRITE( R_EAX );
2730 PUSH_realigned_r32(R_EAX);
2731 MEM_READ_BYTE( R_EAX, R_EAX );
2732 POP_realigned_r32(R_ECX);
2733 OR_imm32_r32(imm, R_EAX );
2734 MEM_WRITE_BYTE( R_ECX, R_EAX );
2735 sh4_x86.tstate = TSTATE_NONE;
2741 { /* MOV.L @(disp, PC), Rn */
2742 uint32_t Rn = ((ir>>8)&0xF); uint32_t disp = (ir&0xFF)<<2;
2743 if( sh4_x86.in_delay_slot ) {
2746 uint32_t target = (pc & 0xFFFFFFFC) + disp + 4;
2747 if( IS_IN_ICACHE(target) ) {
2748 // If the target address is in the same page as the code, it's
2749 // pretty safe to just ref it directly and circumvent the whole
2750 // memory subsystem. (this is a big performance win)
2752 // FIXME: There's a corner-case that's not handled here when
2753 // the current code-page is in the ITLB but not in the UTLB.
2754 // (should generate a TLB miss although need to test SH4
2755 // behaviour to confirm) Unlikely to be anyone depending on this
2756 // behaviour though.
2757 sh4ptr_t ptr = GET_ICACHE_PTR(target);
2758 MOV_moff32_EAX( ptr );
2760 // Note: we use sh4r.pc for the calc as we could be running at a
2761 // different virtual address than the translation was done with,
2762 // but we can safely assume that the low bits are the same.
2763 load_imm32( R_EAX, (pc-sh4_x86.block_start_pc) + disp + 4 - (pc&0x03) );
2764 ADD_sh4r_r32( R_PC, R_EAX );
2765 MMU_TRANSLATE_READ( R_EAX );
2766 MEM_READ_LONG( R_EAX, R_EAX );
2767 sh4_x86.tstate = TSTATE_NONE;
2769 store_reg( R_EAX, Rn );
2774 { /* MOV #imm, Rn */
2775 uint32_t Rn = ((ir>>8)&0xF); int32_t imm = SIGNEXT8(ir&0xFF);
2776 load_imm32( R_EAX, imm );
2777 store_reg( R_EAX, Rn );
2783 { /* FADD FRm, FRn */
2784 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2786 load_spreg( R_ECX, R_FPSCR );
2787 TEST_imm32_r32( FPSCR_PR, R_ECX );
2788 load_fr_bank( R_EDX );
2789 JNE_rel8(13,doubleprec);
2790 push_fr(R_EDX, FRm);
2791 push_fr(R_EDX, FRn);
2795 JMP_TARGET(doubleprec);
2796 push_dr(R_EDX, FRm);
2797 push_dr(R_EDX, FRn);
2801 sh4_x86.tstate = TSTATE_NONE;
2805 { /* FSUB FRm, FRn */
2806 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2808 load_spreg( R_ECX, R_FPSCR );
2809 TEST_imm32_r32( FPSCR_PR, R_ECX );
2810 load_fr_bank( R_EDX );
2811 JNE_rel8(13, doubleprec);
2812 push_fr(R_EDX, FRn);
2813 push_fr(R_EDX, FRm);
2817 JMP_TARGET(doubleprec);
2818 push_dr(R_EDX, FRn);
2819 push_dr(R_EDX, FRm);
2823 sh4_x86.tstate = TSTATE_NONE;
2827 { /* FMUL FRm, FRn */
2828 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2830 load_spreg( R_ECX, R_FPSCR );
2831 TEST_imm32_r32( FPSCR_PR, R_ECX );
2832 load_fr_bank( R_EDX );
2833 JNE_rel8(13, doubleprec);
2834 push_fr(R_EDX, FRm);
2835 push_fr(R_EDX, FRn);
2839 JMP_TARGET(doubleprec);
2840 push_dr(R_EDX, FRm);
2841 push_dr(R_EDX, FRn);
2845 sh4_x86.tstate = TSTATE_NONE;
2849 { /* FDIV FRm, FRn */
2850 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2852 load_spreg( R_ECX, R_FPSCR );
2853 TEST_imm32_r32( FPSCR_PR, R_ECX );
2854 load_fr_bank( R_EDX );
2855 JNE_rel8(13, doubleprec);
2856 push_fr(R_EDX, FRn);
2857 push_fr(R_EDX, FRm);
2861 JMP_TARGET(doubleprec);
2862 push_dr(R_EDX, FRn);
2863 push_dr(R_EDX, FRm);
2867 sh4_x86.tstate = TSTATE_NONE;
2871 { /* FCMP/EQ FRm, FRn */
2872 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2874 load_spreg( R_ECX, R_FPSCR );
2875 TEST_imm32_r32( FPSCR_PR, R_ECX );
2876 load_fr_bank( R_EDX );
2877 JNE_rel8(8, doubleprec);
2878 push_fr(R_EDX, FRm);
2879 push_fr(R_EDX, FRn);
2881 JMP_TARGET(doubleprec);
2882 push_dr(R_EDX, FRm);
2883 push_dr(R_EDX, FRn);
2888 sh4_x86.tstate = TSTATE_NONE;
2892 { /* FCMP/GT FRm, FRn */
2893 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2895 load_spreg( R_ECX, R_FPSCR );
2896 TEST_imm32_r32( FPSCR_PR, R_ECX );
2897 load_fr_bank( R_EDX );
2898 JNE_rel8(8, doubleprec);
2899 push_fr(R_EDX, FRm);
2900 push_fr(R_EDX, FRn);
2902 JMP_TARGET(doubleprec);
2903 push_dr(R_EDX, FRm);
2904 push_dr(R_EDX, FRn);
2909 sh4_x86.tstate = TSTATE_NONE;
2913 { /* FMOV @(R0, Rm), FRn */
2914 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2916 load_reg( R_EAX, Rm );
2917 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
2918 check_ralign32( R_EAX );
2919 MMU_TRANSLATE_READ( R_EAX );
2920 load_spreg( R_EDX, R_FPSCR );
2921 TEST_imm32_r32( FPSCR_SZ, R_EDX );
2922 JNE_rel8(8 + MEM_READ_SIZE, doublesize);
2923 MEM_READ_LONG( R_EAX, R_EAX );
2924 load_fr_bank( R_EDX );
2925 store_fr( R_EDX, R_EAX, FRn );
2927 JMP_rel8(21 + MEM_READ_DOUBLE_SIZE, end);
2928 JMP_TARGET(doublesize);
2929 MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
2930 load_spreg( R_EDX, R_FPSCR ); // assume read_long clobbered it
2931 load_xf_bank( R_EDX );
2932 store_fr( R_EDX, R_ECX, FRn&0x0E );
2933 store_fr( R_EDX, R_EAX, FRn|0x01 );
2936 JMP_rel8(9 + MEM_READ_DOUBLE_SIZE, end);
2937 JMP_TARGET(doublesize);
2938 MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
2939 load_fr_bank( R_EDX );
2940 store_fr( R_EDX, R_ECX, FRn&0x0E );
2941 store_fr( R_EDX, R_EAX, FRn|0x01 );
2944 sh4_x86.tstate = TSTATE_NONE;
2948 { /* FMOV FRm, @(R0, Rn) */
2949 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2951 load_reg( R_EAX, Rn );
2952 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
2953 check_walign32( R_EAX );
2954 MMU_TRANSLATE_WRITE( R_EAX );
2955 load_spreg( R_EDX, R_FPSCR );
2956 TEST_imm32_r32( FPSCR_SZ, R_EDX );
2957 JNE_rel8(8 + MEM_WRITE_SIZE, doublesize);
2958 load_fr_bank( R_EDX );
2959 load_fr( R_EDX, R_ECX, FRm );
2960 MEM_WRITE_LONG( R_EAX, R_ECX ); // 12
2962 JMP_rel8( 18 + MEM_WRITE_DOUBLE_SIZE, end );
2963 JMP_TARGET(doublesize);
2964 load_xf_bank( R_EDX );
2965 load_fr( R_EDX, R_ECX, FRm&0x0E );
2966 load_fr( R_EDX, R_EDX, FRm|0x01 );
2967 MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
2970 JMP_rel8( 9 + MEM_WRITE_DOUBLE_SIZE, end );
2971 JMP_TARGET(doublesize);
2972 load_fr_bank( R_EDX );
2973 load_fr( R_EDX, R_ECX, FRm&0x0E );
2974 load_fr( R_EDX, R_EDX, FRm|0x01 );
2975 MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
2978 sh4_x86.tstate = TSTATE_NONE;
2982 { /* FMOV @Rm, FRn */
2983 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2985 load_reg( R_EAX, Rm );
2986 check_ralign32( R_EAX );
2987 MMU_TRANSLATE_READ( R_EAX );
2988 load_spreg( R_EDX, R_FPSCR );
2989 TEST_imm32_r32( FPSCR_SZ, R_EDX );
2990 JNE_rel8(8 + MEM_READ_SIZE, doublesize);
2991 MEM_READ_LONG( R_EAX, R_EAX );
2992 load_fr_bank( R_EDX );
2993 store_fr( R_EDX, R_EAX, FRn );
2995 JMP_rel8(21 + MEM_READ_DOUBLE_SIZE, end);
2996 JMP_TARGET(doublesize);
2997 MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
2998 load_spreg( R_EDX, R_FPSCR ); // assume read_long clobbered it
2999 load_xf_bank( R_EDX );
3000 store_fr( R_EDX, R_ECX, FRn&0x0E );
3001 store_fr( R_EDX, R_EAX, FRn|0x01 );
3004 JMP_rel8(9 + MEM_READ_DOUBLE_SIZE, end);
3005 JMP_TARGET(doublesize);
3006 MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
3007 load_fr_bank( R_EDX );
3008 store_fr( R_EDX, R_ECX, FRn&0x0E );
3009 store_fr( R_EDX, R_EAX, FRn|0x01 );
3012 sh4_x86.tstate = TSTATE_NONE;
3016 { /* FMOV @Rm+, FRn */
3017 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
3019 load_reg( R_EAX, Rm );
3020 check_ralign32( R_EAX );
3021 MMU_TRANSLATE_READ( R_EAX );
3022 load_spreg( R_EDX, R_FPSCR );
3023 TEST_imm32_r32( FPSCR_SZ, R_EDX );
3024 JNE_rel8(12 + MEM_READ_SIZE, doublesize);
3025 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
3026 MEM_READ_LONG( R_EAX, R_EAX );
3027 load_fr_bank( R_EDX );
3028 store_fr( R_EDX, R_EAX, FRn );
3030 JMP_rel8(25 + MEM_READ_DOUBLE_SIZE, end);
3031 JMP_TARGET(doublesize);
3032 ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rm]) );
3033 MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
3034 load_spreg( R_EDX, R_FPSCR ); // assume read_long clobbered it
3035 load_xf_bank( R_EDX );
3036 store_fr( R_EDX, R_ECX, FRn&0x0E );
3037 store_fr( R_EDX, R_EAX, FRn|0x01 );
3040 JMP_rel8(13 + MEM_READ_DOUBLE_SIZE, end);
3041 ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rm]) );
3042 MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
3043 load_fr_bank( R_EDX );
3044 store_fr( R_EDX, R_ECX, FRn&0x0E );
3045 store_fr( R_EDX, R_EAX, FRn|0x01 );
3048 sh4_x86.tstate = TSTATE_NONE;
3052 { /* FMOV FRm, @Rn */
3053 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
3055 load_reg( R_EAX, Rn );
3056 check_walign32( R_EAX );
3057 MMU_TRANSLATE_WRITE( R_EAX );
3058 load_spreg( R_EDX, R_FPSCR );
3059 TEST_imm32_r32( FPSCR_SZ, R_EDX );
3060 JNE_rel8(8 + MEM_WRITE_SIZE, doublesize);
3061 load_fr_bank( R_EDX );
3062 load_fr( R_EDX, R_ECX, FRm );
3063 MEM_WRITE_LONG( R_EAX, R_ECX ); // 12
3065 JMP_rel8( 18 + MEM_WRITE_DOUBLE_SIZE, end );
3066 JMP_TARGET(doublesize);
3067 load_xf_bank( R_EDX );
3068 load_fr( R_EDX, R_ECX, FRm&0x0E );
3069 load_fr( R_EDX, R_EDX, FRm|0x01 );
3070 MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
3073 JMP_rel8( 9 + MEM_WRITE_DOUBLE_SIZE, end );
3074 JMP_TARGET(doublesize);
3075 load_fr_bank( R_EDX );
3076 load_fr( R_EDX, R_ECX, FRm&0x0E );
3077 load_fr( R_EDX, R_EDX, FRm|0x01 );
3078 MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
3081 sh4_x86.tstate = TSTATE_NONE;
3085 { /* FMOV FRm, @-Rn */
3086 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
3088 load_reg( R_EAX, Rn );
3089 check_walign32( R_EAX );
3090 load_spreg( R_EDX, R_FPSCR );
3091 TEST_imm32_r32( FPSCR_SZ, R_EDX );
3092 JNE_rel8(15 + MEM_WRITE_SIZE + MMU_TRANSLATE_SIZE, doublesize);
3093 ADD_imm8s_r32( -4, R_EAX );
3094 MMU_TRANSLATE_WRITE( R_EAX );
3095 load_fr_bank( R_EDX );
3096 load_fr( R_EDX, R_ECX, FRm );
3097 ADD_imm8s_sh4r(-4,REG_OFFSET(r[Rn]));
3098 MEM_WRITE_LONG( R_EAX, R_ECX ); // 12
3100 JMP_rel8( 25 + MEM_WRITE_DOUBLE_SIZE + MMU_TRANSLATE_SIZE, end );
3101 JMP_TARGET(doublesize);
3102 ADD_imm8s_r32(-8,R_EAX);
3103 MMU_TRANSLATE_WRITE( R_EAX );
3104 load_xf_bank( R_EDX );
3105 load_fr( R_EDX, R_ECX, FRm&0x0E );
3106 load_fr( R_EDX, R_EDX, FRm|0x01 );
3107 ADD_imm8s_sh4r(-8,REG_OFFSET(r[Rn]));
3108 MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
3111 JMP_rel8( 16 + MEM_WRITE_DOUBLE_SIZE + MMU_TRANSLATE_SIZE, end );
3112 JMP_TARGET(doublesize);
3113 ADD_imm8s_r32(-8,R_EAX);
3114 MMU_TRANSLATE_WRITE( R_EAX );
3115 load_fr_bank( R_EDX );
3116 load_fr( R_EDX, R_ECX, FRm&0x0E );
3117 load_fr( R_EDX, R_EDX, FRm|0x01 );
3118 ADD_imm8s_sh4r(-8,REG_OFFSET(r[Rn]));
3119 MEM_WRITE_DOUBLE( R_EAX, R_ECX, R_EDX );
3122 sh4_x86.tstate = TSTATE_NONE;
3126 { /* FMOV FRm, FRn */
3127 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
3128 /* As horrible as this looks, it's actually covering 5 separate cases:
3129 * 1. 32-bit fr-to-fr (PR=0)
3130 * 2. 64-bit dr-to-dr (PR=1, FRm&1 == 0, FRn&1 == 0 )
3131 * 3. 64-bit dr-to-xd (PR=1, FRm&1 == 0, FRn&1 == 1 )
3132 * 4. 64-bit xd-to-dr (PR=1, FRm&1 == 1, FRn&1 == 0 )
3133 * 5. 64-bit xd-to-xd (PR=1, FRm&1 == 1, FRn&1 == 1 )
3136 load_spreg( R_ECX, R_FPSCR );
3137 load_fr_bank( R_EDX );
3138 TEST_imm32_r32( FPSCR_SZ, R_ECX );
3139 JNE_rel8(8, doublesize);
3140 load_fr( R_EDX, R_EAX, FRm ); // PR=0 branch
3141 store_fr( R_EDX, R_EAX, FRn );
3144 JMP_TARGET(doublesize);
3145 load_xf_bank( R_ECX );
3146 load_fr( R_ECX, R_EAX, FRm-1 );
3148 load_fr( R_ECX, R_EDX, FRm );
3149 store_fr( R_ECX, R_EAX, FRn-1 );
3150 store_fr( R_ECX, R_EDX, FRn );
3151 } else /* FRn&1 == 0 */ {
3152 load_fr( R_ECX, R_ECX, FRm );
3153 store_fr( R_EDX, R_EAX, FRn );
3154 store_fr( R_EDX, R_ECX, FRn+1 );
3157 } else /* FRm&1 == 0 */ {
3160 load_xf_bank( R_ECX );
3161 load_fr( R_EDX, R_EAX, FRm );
3162 load_fr( R_EDX, R_EDX, FRm+1 );
3163 store_fr( R_ECX, R_EAX, FRn-1 );
3164 store_fr( R_ECX, R_EDX, FRn );
3166 } else /* FRn&1 == 0 */ {
3168 load_fr( R_EDX, R_EAX, FRm );
3169 load_fr( R_EDX, R_ECX, FRm+1 );
3170 store_fr( R_EDX, R_EAX, FRn );
3171 store_fr( R_EDX, R_ECX, FRn+1 );
3175 sh4_x86.tstate = TSTATE_NONE;
3179 switch( (ir&0xF0) >> 4 ) {
3181 { /* FSTS FPUL, FRn */
3182 uint32_t FRn = ((ir>>8)&0xF);
3184 load_fr_bank( R_ECX );
3185 load_spreg( R_EAX, R_FPUL );
3186 store_fr( R_ECX, R_EAX, FRn );
3187 sh4_x86.tstate = TSTATE_NONE;
3191 { /* FLDS FRm, FPUL */
3192 uint32_t FRm = ((ir>>8)&0xF);
3194 load_fr_bank( R_ECX );
3195 load_fr( R_ECX, R_EAX, FRm );
3196 store_spreg( R_EAX, R_FPUL );
3197 sh4_x86.tstate = TSTATE_NONE;
3201 { /* FLOAT FPUL, FRn */
3202 uint32_t FRn = ((ir>>8)&0xF);
3204 load_spreg( R_ECX, R_FPSCR );
3205 load_spreg(R_EDX, REG_OFFSET(fr_bank));
3207 TEST_imm32_r32( FPSCR_PR, R_ECX );
3208 JNE_rel8(5, doubleprec);
3209 pop_fr( R_EDX, FRn );
3211 JMP_TARGET(doubleprec);
3212 pop_dr( R_EDX, FRn );
3214 sh4_x86.tstate = TSTATE_NONE;
3218 { /* FTRC FRm, FPUL */
3219 uint32_t FRm = ((ir>>8)&0xF);
3221 load_spreg( R_ECX, R_FPSCR );
3222 load_fr_bank( R_EDX );
3223 TEST_imm32_r32( FPSCR_PR, R_ECX );
3224 JNE_rel8(5, doubleprec);
3225 push_fr( R_EDX, FRm );
3227 JMP_TARGET(doubleprec);
3228 push_dr( R_EDX, FRm );
3230 load_imm32( R_ECX, (uint32_t)&max_int );
3231 FILD_r32ind( R_ECX );
3233 JNA_rel8( 32, sat );
3234 load_imm32( R_ECX, (uint32_t)&min_int ); // 5
3235 FILD_r32ind( R_ECX ); // 2
3237 JAE_rel8( 21, sat2 ); // 2
3238 load_imm32( R_EAX, (uint32_t)&save_fcw );
3239 FNSTCW_r32ind( R_EAX );
3240 load_imm32( R_EDX, (uint32_t)&trunc_fcw );
3241 FLDCW_r32ind( R_EDX );
3242 FISTP_sh4r(R_FPUL); // 3
3243 FLDCW_r32ind( R_EAX );
3244 JMP_rel8( 9, end ); // 2
3248 MOV_r32ind_r32( R_ECX, R_ECX ); // 2
3249 store_spreg( R_ECX, R_FPUL );
3252 sh4_x86.tstate = TSTATE_NONE;
3257 uint32_t FRn = ((ir>>8)&0xF);
3259 load_spreg( R_ECX, R_FPSCR );
3260 TEST_imm32_r32( FPSCR_PR, R_ECX );
3261 load_fr_bank( R_EDX );
3262 JNE_rel8(10, doubleprec);
3263 push_fr(R_EDX, FRn);
3267 JMP_TARGET(doubleprec);
3268 push_dr(R_EDX, FRn);
3272 sh4_x86.tstate = TSTATE_NONE;
3277 uint32_t FRn = ((ir>>8)&0xF);
3279 load_spreg( R_ECX, R_FPSCR );
3280 load_fr_bank( R_EDX );
3281 TEST_imm32_r32( FPSCR_PR, R_ECX );
3282 JNE_rel8(10, doubleprec);
3283 push_fr(R_EDX, FRn); // 3
3285 pop_fr( R_EDX, FRn); //3
3286 JMP_rel8(8,end); // 2
3287 JMP_TARGET(doubleprec);
3288 push_dr(R_EDX, FRn);
3292 sh4_x86.tstate = TSTATE_NONE;
3297 uint32_t FRn = ((ir>>8)&0xF);
3299 load_spreg( R_ECX, R_FPSCR );
3300 TEST_imm32_r32( FPSCR_PR, R_ECX );
3301 load_fr_bank( R_EDX );
3302 JNE_rel8(10, doubleprec);
3303 push_fr(R_EDX, FRn);
3307 JMP_TARGET(doubleprec);
3308 push_dr(R_EDX, FRn);
3312 sh4_x86.tstate = TSTATE_NONE;
3317 uint32_t FRn = ((ir>>8)&0xF);
3319 load_spreg( R_ECX, R_FPSCR );
3320 TEST_imm32_r32( FPSCR_PR, R_ECX );
3321 load_fr_bank( R_EDX );
3322 JNE_rel8(12, end); // PR=0 only
3324 push_fr(R_EDX, FRn);
3329 sh4_x86.tstate = TSTATE_NONE;
3334 uint32_t FRn = ((ir>>8)&0xF);
3337 load_spreg( R_ECX, R_FPSCR );
3338 TEST_imm32_r32( FPSCR_PR, R_ECX );
3340 XOR_r32_r32( R_EAX, R_EAX );
3341 load_spreg( R_ECX, REG_OFFSET(fr_bank) );
3342 store_fr( R_ECX, R_EAX, FRn );
3344 sh4_x86.tstate = TSTATE_NONE;
3349 uint32_t FRn = ((ir>>8)&0xF);
3352 load_spreg( R_ECX, R_FPSCR );
3353 TEST_imm32_r32( FPSCR_PR, R_ECX );
3355 load_imm32(R_EAX, 0x3F800000);
3356 load_spreg( R_ECX, REG_OFFSET(fr_bank) );
3357 store_fr( R_ECX, R_EAX, FRn );
3359 sh4_x86.tstate = TSTATE_NONE;
3363 { /* FCNVSD FPUL, FRn */
3364 uint32_t FRn = ((ir>>8)&0xF);
3366 load_spreg( R_ECX, R_FPSCR );
3367 TEST_imm32_r32( FPSCR_PR, R_ECX );
3368 JE_rel8(9, end); // only when PR=1
3369 load_fr_bank( R_ECX );
3371 pop_dr( R_ECX, FRn );
3373 sh4_x86.tstate = TSTATE_NONE;
3377 { /* FCNVDS FRm, FPUL */
3378 uint32_t FRm = ((ir>>8)&0xF);
3380 load_spreg( R_ECX, R_FPSCR );
3381 TEST_imm32_r32( FPSCR_PR, R_ECX );
3382 JE_rel8(9, end); // only when PR=1
3383 load_fr_bank( R_ECX );
3384 push_dr( R_ECX, FRm );
3387 sh4_x86.tstate = TSTATE_NONE;
3391 { /* FIPR FVm, FVn */
3392 uint32_t FVn = ((ir>>10)&0x3); uint32_t FVm = ((ir>>8)&0x3);
3394 load_spreg( R_ECX, R_FPSCR );
3395 TEST_imm32_r32( FPSCR_PR, R_ECX );
3396 JNE_rel8(44, doubleprec);
3398 load_fr_bank( R_ECX );
3399 push_fr( R_ECX, FVm<<2 );
3400 push_fr( R_ECX, FVn<<2 );
3402 push_fr( R_ECX, (FVm<<2)+1);
3403 push_fr( R_ECX, (FVn<<2)+1);
3406 push_fr( R_ECX, (FVm<<2)+2);
3407 push_fr( R_ECX, (FVn<<2)+2);
3410 push_fr( R_ECX, (FVm<<2)+3);
3411 push_fr( R_ECX, (FVn<<2)+3);
3414 pop_fr( R_ECX, (FVn<<2)+3);
3415 JMP_TARGET(doubleprec);
3416 sh4_x86.tstate = TSTATE_NONE;
3420 switch( (ir&0x100) >> 8 ) {
3422 { /* FSCA FPUL, FRn */
3423 uint32_t FRn = ((ir>>9)&0x7)<<1;
3425 load_spreg( R_ECX, R_FPSCR );
3426 TEST_imm32_r32( FPSCR_PR, R_ECX );
3427 JNE_rel8( CALL_FUNC2_SIZE + 9, doubleprec );
3428 load_fr_bank( R_ECX );
3429 ADD_imm8s_r32( (FRn&0x0E)<<2, R_ECX );
3430 load_spreg( R_EDX, R_FPUL );
3431 call_func2( sh4_fsca, R_EDX, R_ECX );
3432 JMP_TARGET(doubleprec);
3433 sh4_x86.tstate = TSTATE_NONE;
3437 switch( (ir&0x200) >> 9 ) {
3439 { /* FTRV XMTRX, FVn */
3440 uint32_t FVn = ((ir>>10)&0x3);
3442 load_spreg( R_ECX, R_FPSCR );
3443 TEST_imm32_r32( FPSCR_PR, R_ECX );
3444 JNE_rel8( 18 + CALL_FUNC2_SIZE, doubleprec );
3445 load_fr_bank( R_EDX ); // 3
3446 ADD_imm8s_r32( FVn<<4, R_EDX ); // 3
3447 load_xf_bank( R_ECX ); // 12
3448 call_func2( sh4_ftrv, R_EDX, R_ECX ); // 12
3449 JMP_TARGET(doubleprec);
3450 sh4_x86.tstate = TSTATE_NONE;
3454 switch( (ir&0xC00) >> 10 ) {
3458 load_spreg( R_ECX, R_FPSCR );
3459 XOR_imm32_r32( FPSCR_SZ, R_ECX );
3460 store_spreg( R_ECX, R_FPSCR );
3461 sh4_x86.tstate = TSTATE_NONE;
3467 load_spreg( R_ECX, R_FPSCR );
3468 XOR_imm32_r32( FPSCR_FR, R_ECX );
3469 store_spreg( R_ECX, R_FPSCR );
3470 update_fr_bank( R_ECX );
3471 sh4_x86.tstate = TSTATE_NONE;
3476 if( sh4_x86.in_delay_slot ) {
3479 JMP_exc(EXC_ILLEGAL);
3499 { /* FMAC FR0, FRm, FRn */
3500 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
3502 load_spreg( R_ECX, R_FPSCR );
3503 load_spreg( R_EDX, REG_OFFSET(fr_bank));
3504 TEST_imm32_r32( FPSCR_PR, R_ECX );
3505 JNE_rel8(18, doubleprec);
3506 push_fr( R_EDX, 0 );
3507 push_fr( R_EDX, FRm );
3509 push_fr( R_EDX, FRn );
3511 pop_fr( R_EDX, FRn );
3513 JMP_TARGET(doubleprec);
3514 push_dr( R_EDX, 0 );
3515 push_dr( R_EDX, FRm );
3517 push_dr( R_EDX, FRn );
3519 pop_dr( R_EDX, FRn );
3521 sh4_x86.tstate = TSTATE_NONE;
3531 sh4_x86.in_delay_slot = DELAY_NONE;
.