2 * $Id: sh4x86.c,v 1.13 2007-09-19 11:30:30 nkeynes Exp $
4 * SH4 => x86 translation. This version does no real optimization, it just
5 * outputs straight-line x86 code - it mainly exists to provide a baseline
6 * to test the optimizing versions against.
8 * Copyright (c) 2007 Nathan Keynes.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
28 #include "sh4/sh4core.h"
29 #include "sh4/sh4trans.h"
30 #include "sh4/sh4mmio.h"
31 #include "sh4/x86op.h"
34 #define DEFAULT_BACKPATCH_SIZE 4096
37 * Struct to manage internal translation state. This state is not saved -
38 * it is only valid between calls to sh4_translate_begin_block() and
39 * sh4_translate_end_block()
41 struct sh4_x86_state {
42 gboolean in_delay_slot;
43 gboolean priv_checked; /* true if we've already checked the cpu mode. */
44 gboolean fpuen_checked; /* true if we've already checked fpu enabled. */
47 /* Allocated memory for the (block-wide) back-patch list */
48 uint32_t **backpatch_list;
49 uint32_t backpatch_posn;
50 uint32_t backpatch_size;
53 #define EXIT_DATA_ADDR_READ 0
54 #define EXIT_DATA_ADDR_WRITE 7
55 #define EXIT_ILLEGAL 14
56 #define EXIT_SLOT_ILLEGAL 21
57 #define EXIT_FPU_DISABLED 28
58 #define EXIT_SLOT_FPU_DISABLED 35
60 static struct sh4_x86_state sh4_x86;
62 static uint32_t max_int = 0x7FFFFFFF;
63 static uint32_t min_int = 0x80000000;
64 static uint32_t save_fcw; /* save value for fpu control word */
65 static uint32_t trunc_fcw = 0x0F7F; /* fcw value for truncation mode */
66 void signsat48( void )
68 if( ((int64_t)sh4r.mac) < (int64_t)0xFFFF800000000000LL )
69 sh4r.mac = 0xFFFF800000000000LL;
70 else if( ((int64_t)sh4r.mac) > (int64_t)0x00007FFFFFFFFFFFLL )
71 sh4r.mac = 0x00007FFFFFFFFFFFLL;
74 void sh4_fsca( uint32_t anglei, float *fr )
76 float angle = (((float)(anglei&0xFFFF))/65536.0) * 2 * M_PI;
83 if( MMIO_READ( CPG, STBCR ) & 0x80 ) {
84 sh4r.sh4_state = SH4_STATE_STANDBY;
86 sh4r.sh4_state = SH4_STATE_SLEEP;
91 * Compute the matrix tranform of fv given the matrix xf.
92 * Both fv and xf are word-swapped as per the sh4r.fr banks
94 void sh4_ftrv( float *target, float *xf )
96 float fv[4] = { target[1], target[0], target[3], target[2] };
97 target[1] = xf[1] * fv[0] + xf[5]*fv[1] +
98 xf[9]*fv[2] + xf[13]*fv[3];
99 target[0] = xf[0] * fv[0] + xf[4]*fv[1] +
100 xf[8]*fv[2] + xf[12]*fv[3];
101 target[3] = xf[3] * fv[0] + xf[7]*fv[1] +
102 xf[11]*fv[2] + xf[15]*fv[3];
103 target[2] = xf[2] * fv[0] + xf[6]*fv[1] +
104 xf[10]*fv[2] + xf[14]*fv[3];
111 sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE);
112 sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(uint32_t *);
116 static void sh4_x86_add_backpatch( uint8_t *ptr )
118 if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) {
119 sh4_x86.backpatch_size <<= 1;
120 sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list, sh4_x86.backpatch_size * sizeof(uint32_t *) );
121 assert( sh4_x86.backpatch_list != NULL );
123 sh4_x86.backpatch_list[sh4_x86.backpatch_posn++] = (uint32_t *)ptr;
126 static void sh4_x86_do_backpatch( uint8_t *reloc_base )
129 for( i=0; i<sh4_x86.backpatch_posn; i++ ) {
130 *sh4_x86.backpatch_list[i] += (reloc_base - ((uint8_t *)sh4_x86.backpatch_list[i]) - 4);
135 * Emit an instruction to load an SH4 reg into a real register
137 static inline void load_reg( int x86reg, int sh4reg )
139 /* mov [bp+n], reg */
141 OP(0x45 + (x86reg<<3));
142 OP(REG_OFFSET(r[sh4reg]));
145 static inline void load_reg16s( int x86reg, int sh4reg )
149 MODRM_r32_sh4r(x86reg, REG_OFFSET(r[sh4reg]));
152 static inline void load_reg16u( int x86reg, int sh4reg )
156 MODRM_r32_sh4r(x86reg, REG_OFFSET(r[sh4reg]));
160 #define load_spreg( x86reg, regoff ) MOV_sh4r_r32( regoff, x86reg )
161 #define store_spreg( x86reg, regoff ) MOV_r32_sh4r( x86reg, regoff )
163 * Emit an instruction to load an immediate value into a register
165 static inline void load_imm32( int x86reg, uint32_t value ) {
166 /* mov #value, reg */
172 * Emit an instruction to store an SH4 reg (RN)
174 void static inline store_reg( int x86reg, int sh4reg ) {
175 /* mov reg, [bp+n] */
177 OP(0x45 + (x86reg<<3));
178 OP(REG_OFFSET(r[sh4reg]));
181 #define load_fr_bank(bankreg) load_spreg( bankreg, REG_OFFSET(fr_bank))
184 * Load an FR register (single-precision floating point) into an integer x86
185 * register (eg for register-to-register moves)
187 void static inline load_fr( int bankreg, int x86reg, int frm )
189 OP(0x8B); OP(0x40+bankreg+(x86reg<<3)); OP((frm^1)<<2);
193 * Store an FR register (single-precision floating point) into an integer x86
194 * register (eg for register-to-register moves)
196 void static inline store_fr( int bankreg, int x86reg, int frn )
198 OP(0x89); OP(0x40+bankreg+(x86reg<<3)); OP((frn^1)<<2);
203 * Load a pointer to the back fp back into the specified x86 register. The
204 * bankreg must have been previously loaded with FPSCR.
207 static inline void load_xf_bank( int bankreg )
210 SHR_imm8_r32( (21 - 6), bankreg ); // Extract bit 21 then *64 for bank size
211 AND_imm8s_r32( 0x40, bankreg ); // Complete extraction
212 OP(0x8D); OP(0x44+(bankreg<<3)); OP(0x28+bankreg); OP(REG_OFFSET(fr)); // LEA [ebp+bankreg+disp], bankreg
216 * Update the fr_bank pointer based on the current fpscr value.
218 static inline void update_fr_bank( int fpscrreg )
220 SHR_imm8_r32( (21 - 6), fpscrreg ); // Extract bit 21 then *64 for bank size
221 AND_imm8s_r32( 0x40, fpscrreg ); // Complete extraction
222 OP(0x8D); OP(0x44+(fpscrreg<<3)); OP(0x28+fpscrreg); OP(REG_OFFSET(fr)); // LEA [ebp+fpscrreg+disp], fpscrreg
223 store_spreg( fpscrreg, REG_OFFSET(fr_bank) );
226 * Push FPUL (as a 32-bit float) onto the FPU stack
228 static inline void push_fpul( )
230 OP(0xD9); OP(0x45); OP(R_FPUL);
234 * Pop FPUL (as a 32-bit float) from the FPU stack
236 static inline void pop_fpul( )
238 OP(0xD9); OP(0x5D); OP(R_FPUL);
242 * Push a 32-bit float onto the FPU stack, with bankreg previously loaded
243 * with the location of the current fp bank.
245 static inline void push_fr( int bankreg, int frm )
247 OP(0xD9); OP(0x40 + bankreg); OP((frm^1)<<2); // FLD.S [bankreg + frm^1*4]
251 * Pop a 32-bit float from the FPU stack and store it back into the fp bank,
252 * with bankreg previously loaded with the location of the current fp bank.
254 static inline void pop_fr( int bankreg, int frm )
256 OP(0xD9); OP(0x58 + bankreg); OP((frm^1)<<2); // FST.S [bankreg + frm^1*4]
260 * Push a 64-bit double onto the FPU stack, with bankreg previously loaded
261 * with the location of the current fp bank.
263 static inline void push_dr( int bankreg, int frm )
265 OP(0xDD); OP(0x40 + bankreg); OP(frm<<2); // FLD.D [bankreg + frm*4]
268 static inline void pop_dr( int bankreg, int frm )
270 OP(0xDD); OP(0x58 + bankreg); OP(frm<<2); // FST.D [bankreg + frm*4]
274 * Note: clobbers EAX to make the indirect call - this isn't usually
275 * a problem since the callee will usually clobber it anyway.
277 static inline void call_func0( void *ptr )
279 load_imm32(R_EAX, (uint32_t)ptr);
283 static inline void call_func1( void *ptr, int arg1 )
287 ADD_imm8s_r32( 4, R_ESP );
290 static inline void call_func2( void *ptr, int arg1, int arg2 )
295 ADD_imm8s_r32( 8, R_ESP );
299 * Write a double (64-bit) value into memory, with the first word in arg2a, and
300 * the second in arg2b
303 static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
305 ADD_imm8s_r32( 4, addr );
308 ADD_imm8s_r32( -4, addr );
311 call_func0(sh4_write_long);
312 ADD_imm8s_r32( 8, R_ESP );
313 call_func0(sh4_write_long);
314 ADD_imm8s_r32( 8, R_ESP );
318 * Read a double (64-bit) value from memory, writing the first word into arg2a
319 * and the second into arg2b. The addr must not be in EAX
322 static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
325 call_func0(sh4_read_long);
328 ADD_imm8s_r32( 4, addr );
330 call_func0(sh4_read_long);
331 ADD_imm8s_r32( 4, R_ESP );
332 MOV_r32_r32( R_EAX, arg2b );
336 /* Exception checks - Note that all exception checks will clobber EAX */
337 static void check_priv( )
339 if( !sh4_x86.priv_checked ) {
340 sh4_x86.priv_checked = TRUE;
341 load_spreg( R_EAX, R_SR );
342 AND_imm32_r32( SR_MD, R_EAX );
343 if( sh4_x86.in_delay_slot ) {
344 JE_exit( EXIT_SLOT_ILLEGAL );
346 JE_exit( EXIT_ILLEGAL );
351 static void check_fpuen( )
353 if( !sh4_x86.fpuen_checked ) {
354 sh4_x86.fpuen_checked = TRUE;
355 load_spreg( R_EAX, R_SR );
356 AND_imm32_r32( SR_FD, R_EAX );
357 if( sh4_x86.in_delay_slot ) {
358 JNE_exit(EXIT_SLOT_FPU_DISABLED);
360 JNE_exit(EXIT_FPU_DISABLED);
365 static void check_ralign16( int x86reg )
367 TEST_imm32_r32( 0x00000001, x86reg );
368 JNE_exit(EXIT_DATA_ADDR_READ);
371 static void check_walign16( int x86reg )
373 TEST_imm32_r32( 0x00000001, x86reg );
374 JNE_exit(EXIT_DATA_ADDR_WRITE);
377 static void check_ralign32( int x86reg )
379 TEST_imm32_r32( 0x00000003, x86reg );
380 JNE_exit(EXIT_DATA_ADDR_READ);
382 static void check_walign32( int x86reg )
384 TEST_imm32_r32( 0x00000003, x86reg );
385 JNE_exit(EXIT_DATA_ADDR_WRITE);
389 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
390 #define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
391 #define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
392 #define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
393 #define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
394 #define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
395 #define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
397 #define SLOTILLEGAL() JMP_exit(EXIT_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1;
402 * Emit the 'start of block' assembly. Sets up the stack frame and save
405 void sh4_translate_begin_block()
409 load_imm32( R_EBP, (uint32_t)&sh4r );
412 XOR_r32_r32(R_ESI, R_ESI);
414 sh4_x86.in_delay_slot = FALSE;
415 sh4_x86.priv_checked = FALSE;
416 sh4_x86.fpuen_checked = FALSE;
417 sh4_x86.backpatch_posn = 0;
418 sh4_x86.exit_code = 1;
422 * Exit the block early (ie branch out), conditionally or otherwise
426 store_spreg( R_EDI, REG_OFFSET(pc) );
427 MOV_moff32_EAX( (uint32_t)&sh4_cpu_period );
428 load_spreg( R_ECX, REG_OFFSET(slice_cycle) );
430 ADD_r32_r32( R_EAX, R_ECX );
431 store_spreg( R_ECX, REG_OFFSET(slice_cycle) );
432 load_imm32( R_EAX, sh4_x86.exit_code );
440 * Flush any open regs back to memory, restore SI/DI/, update PC, etc
442 void sh4_translate_end_block( sh4addr_t pc ) {
443 assert( !sh4_x86.in_delay_slot ); // should never stop here
444 // Normal termination - save PC, cycle count
447 if( sh4_x86.backpatch_posn != 0 ) {
448 uint8_t *end_ptr = xlat_output;
449 // Exception termination. Jump block for various exception codes:
450 PUSH_imm32( EXC_DATA_ADDR_READ );
451 JMP_rel8( 33, target1 );
452 PUSH_imm32( EXC_DATA_ADDR_WRITE );
453 JMP_rel8( 26, target2 );
454 PUSH_imm32( EXC_ILLEGAL );
455 JMP_rel8( 19, target3 );
456 PUSH_imm32( EXC_SLOT_ILLEGAL );
457 JMP_rel8( 12, target4 );
458 PUSH_imm32( EXC_FPU_DISABLED );
459 JMP_rel8( 5, target5 );
460 PUSH_imm32( EXC_SLOT_FPU_DISABLED );
467 load_spreg( R_ECX, REG_OFFSET(pc) );
468 ADD_r32_r32( R_ESI, R_ECX );
469 ADD_r32_r32( R_ESI, R_ECX );
470 store_spreg( R_ECX, REG_OFFSET(pc) );
471 MOV_moff32_EAX( (uint32_t)&sh4_cpu_period );
472 load_spreg( R_ECX, REG_OFFSET(slice_cycle) );
474 ADD_r32_r32( R_EAX, R_ECX );
475 store_spreg( R_ECX, REG_OFFSET(slice_cycle) );
477 load_imm32( R_EAX, (uint32_t)sh4_raise_exception ); // 6
478 CALL_r32( R_EAX ); // 2
479 ADD_imm8s_r32( 4, R_ESP );
485 sh4_x86_do_backpatch( end_ptr );
491 extern uint16_t *sh4_icache;
492 extern uint32_t sh4_icache_addr;
495 * Translate a single instruction. Delayed branches are handled specially
496 * by translating both branch and delayed instruction as a single unit (as
499 * @return true if the instruction marks the end of a basic block
502 uint32_t sh4_x86_translate_instruction( uint32_t pc )
505 /* Read instruction */
506 uint32_t pageaddr = pc >> 12;
507 if( sh4_icache != NULL && pageaddr == sh4_icache_addr ) {
508 ir = sh4_icache[(pc&0xFFF)>>1];
510 sh4_icache = (uint16_t *)mem_get_page(pc);
511 if( ((uint32_t)sh4_icache) < MAX_IO_REGIONS ) {
512 /* If someone's actually been so daft as to try to execute out of an IO
513 * region, fallback on the full-blown memory read
516 ir = sh4_read_word(pc);
518 sh4_icache_addr = pageaddr;
519 ir = sh4_icache[(pc&0xFFF)>>1];
523 switch( (ir&0xF000) >> 12 ) {
527 switch( (ir&0x80) >> 7 ) {
529 switch( (ir&0x70) >> 4 ) {
532 uint32_t Rn = ((ir>>8)&0xF);
534 call_func0(sh4_read_sr);
535 store_reg( R_EAX, Rn );
540 uint32_t Rn = ((ir>>8)&0xF);
541 load_spreg( R_EAX, R_GBR );
542 store_reg( R_EAX, Rn );
547 uint32_t Rn = ((ir>>8)&0xF);
549 load_spreg( R_EAX, R_VBR );
550 store_reg( R_EAX, Rn );
555 uint32_t Rn = ((ir>>8)&0xF);
557 load_spreg( R_EAX, R_SSR );
558 store_reg( R_EAX, Rn );
563 uint32_t Rn = ((ir>>8)&0xF);
565 load_spreg( R_EAX, R_SPC );
566 store_reg( R_EAX, Rn );
575 { /* STC Rm_BANK, Rn */
576 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm_BANK = ((ir>>4)&0x7);
578 load_spreg( R_EAX, REG_OFFSET(r_bank[Rm_BANK]) );
579 store_reg( R_EAX, Rn );
585 switch( (ir&0xF0) >> 4 ) {
588 uint32_t Rn = ((ir>>8)&0xF);
589 if( sh4_x86.in_delay_slot ) {
592 load_imm32( R_EAX, pc + 4 );
593 store_spreg( R_EAX, R_PR );
594 load_reg( R_EDI, Rn );
595 ADD_r32_r32( R_EAX, R_EDI );
596 sh4_x86.in_delay_slot = TRUE;
603 uint32_t Rn = ((ir>>8)&0xF);
604 if( sh4_x86.in_delay_slot ) {
607 load_reg( R_EDI, Rn );
608 ADD_imm32_r32( pc + 4, R_EDI );
609 sh4_x86.in_delay_slot = TRUE;
616 uint32_t Rn = ((ir>>8)&0xF);
617 load_reg( R_EAX, Rn );
619 AND_imm32_r32( 0xFC000000, R_EAX );
620 CMP_imm32_r32( 0xE0000000, R_EAX );
622 call_func0( sh4_flush_store_queue );
624 ADD_imm8s_r32( 4, R_ESP );
629 uint32_t Rn = ((ir>>8)&0xF);
634 uint32_t Rn = ((ir>>8)&0xF);
639 uint32_t Rn = ((ir>>8)&0xF);
643 { /* MOVCA.L R0, @Rn */
644 uint32_t Rn = ((ir>>8)&0xF);
645 load_reg( R_EAX, 0 );
646 load_reg( R_ECX, Rn );
647 check_walign32( R_ECX );
648 MEM_WRITE_LONG( R_ECX, R_EAX );
657 { /* MOV.B Rm, @(R0, Rn) */
658 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
659 load_reg( R_EAX, 0 );
660 load_reg( R_ECX, Rn );
661 ADD_r32_r32( R_EAX, R_ECX );
662 load_reg( R_EAX, Rm );
663 MEM_WRITE_BYTE( R_ECX, R_EAX );
667 { /* MOV.W Rm, @(R0, Rn) */
668 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
669 load_reg( R_EAX, 0 );
670 load_reg( R_ECX, Rn );
671 ADD_r32_r32( R_EAX, R_ECX );
672 check_walign16( R_ECX );
673 load_reg( R_EAX, Rm );
674 MEM_WRITE_WORD( R_ECX, R_EAX );
678 { /* MOV.L Rm, @(R0, Rn) */
679 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
680 load_reg( R_EAX, 0 );
681 load_reg( R_ECX, Rn );
682 ADD_r32_r32( R_EAX, R_ECX );
683 check_walign32( R_ECX );
684 load_reg( R_EAX, Rm );
685 MEM_WRITE_LONG( R_ECX, R_EAX );
690 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
691 load_reg( R_EAX, Rm );
692 load_reg( R_ECX, Rn );
694 store_spreg( R_EAX, R_MACL );
698 switch( (ir&0xFF0) >> 4 ) {
713 XOR_r32_r32(R_EAX, R_EAX);
714 store_spreg( R_EAX, R_MACL );
715 store_spreg( R_EAX, R_MACH );
740 switch( (ir&0xF0) >> 4 ) {
743 /* Do nothing. Well, we could emit an 0x90, but what would really be the point? */
748 XOR_r32_r32( R_EAX, R_EAX );
749 store_spreg( R_EAX, R_Q );
750 store_spreg( R_EAX, R_M );
751 store_spreg( R_EAX, R_T );
756 uint32_t Rn = ((ir>>8)&0xF);
757 load_spreg( R_EAX, R_T );
758 store_reg( R_EAX, Rn );
767 switch( (ir&0xF0) >> 4 ) {
770 uint32_t Rn = ((ir>>8)&0xF);
771 load_spreg( R_EAX, R_MACH );
772 store_reg( R_EAX, Rn );
777 uint32_t Rn = ((ir>>8)&0xF);
778 load_spreg( R_EAX, R_MACL );
779 store_reg( R_EAX, Rn );
784 uint32_t Rn = ((ir>>8)&0xF);
785 load_spreg( R_EAX, R_PR );
786 store_reg( R_EAX, Rn );
791 uint32_t Rn = ((ir>>8)&0xF);
793 load_spreg( R_EAX, R_SGR );
794 store_reg( R_EAX, Rn );
799 uint32_t Rn = ((ir>>8)&0xF);
800 load_spreg( R_EAX, R_FPUL );
801 store_reg( R_EAX, Rn );
805 { /* STS FPSCR, Rn */
806 uint32_t Rn = ((ir>>8)&0xF);
807 load_spreg( R_EAX, R_FPSCR );
808 store_reg( R_EAX, Rn );
813 uint32_t Rn = ((ir>>8)&0xF);
815 load_spreg( R_EAX, R_DBR );
816 store_reg( R_EAX, Rn );
825 switch( (ir&0xFF0) >> 4 ) {
828 if( sh4_x86.in_delay_slot ) {
831 load_spreg( R_EDI, R_PR );
832 sh4_x86.in_delay_slot = TRUE;
840 call_func0( sh4_sleep );
841 sh4_x86.exit_code = 0;
842 sh4_x86.in_delay_slot = FALSE;
850 if( sh4_x86.in_delay_slot ) {
853 load_spreg( R_EDI, R_SPC );
854 load_spreg( R_EAX, R_SSR );
855 call_func1( sh4_write_sr, R_EAX );
856 sh4_x86.in_delay_slot = TRUE;
857 sh4_x86.priv_checked = FALSE;
858 sh4_x86.fpuen_checked = FALSE;
869 { /* MOV.B @(R0, Rm), Rn */
870 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
871 load_reg( R_EAX, 0 );
872 load_reg( R_ECX, Rm );
873 ADD_r32_r32( R_EAX, R_ECX );
874 MEM_READ_BYTE( R_ECX, R_EAX );
875 store_reg( R_EAX, Rn );
879 { /* MOV.W @(R0, Rm), Rn */
880 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
881 load_reg( R_EAX, 0 );
882 load_reg( R_ECX, Rm );
883 ADD_r32_r32( R_EAX, R_ECX );
884 check_ralign16( R_ECX );
885 MEM_READ_WORD( R_ECX, R_EAX );
886 store_reg( R_EAX, Rn );
890 { /* MOV.L @(R0, Rm), Rn */
891 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
892 load_reg( R_EAX, 0 );
893 load_reg( R_ECX, Rm );
894 ADD_r32_r32( R_EAX, R_ECX );
895 check_ralign32( R_ECX );
896 MEM_READ_LONG( R_ECX, R_EAX );
897 store_reg( R_EAX, Rn );
901 { /* MAC.L @Rm+, @Rn+ */
902 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
903 load_reg( R_ECX, Rm );
904 check_ralign32( R_ECX );
905 load_reg( R_ECX, Rn );
906 check_ralign32( R_ECX );
907 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
908 MEM_READ_LONG( R_ECX, R_EAX );
910 load_reg( R_ECX, Rm );
911 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
912 MEM_READ_LONG( R_ECX, R_EAX );
915 ADD_r32_sh4r( R_EAX, R_MACL );
916 ADC_r32_sh4r( R_EDX, R_MACH );
918 load_spreg( R_ECX, R_S );
919 TEST_r32_r32(R_ECX, R_ECX);
921 call_func0( signsat48 );
931 { /* MOV.L Rm, @(disp, Rn) */
932 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2;
933 load_reg( R_ECX, Rn );
934 load_reg( R_EAX, Rm );
935 ADD_imm32_r32( disp, R_ECX );
936 check_walign32( R_ECX );
937 MEM_WRITE_LONG( R_ECX, R_EAX );
943 { /* MOV.B Rm, @Rn */
944 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
945 load_reg( R_EAX, Rm );
946 load_reg( R_ECX, Rn );
947 MEM_WRITE_BYTE( R_ECX, R_EAX );
951 { /* MOV.W Rm, @Rn */
952 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
953 load_reg( R_ECX, Rn );
954 check_walign16( R_ECX );
955 load_reg( R_EAX, Rm );
956 MEM_WRITE_WORD( R_ECX, R_EAX );
960 { /* MOV.L Rm, @Rn */
961 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
962 load_reg( R_EAX, Rm );
963 load_reg( R_ECX, Rn );
964 check_walign32(R_ECX);
965 MEM_WRITE_LONG( R_ECX, R_EAX );
969 { /* MOV.B Rm, @-Rn */
970 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
971 load_reg( R_EAX, Rm );
972 load_reg( R_ECX, Rn );
973 ADD_imm8s_r32( -1, R_ECX );
974 store_reg( R_ECX, Rn );
975 MEM_WRITE_BYTE( R_ECX, R_EAX );
979 { /* MOV.W Rm, @-Rn */
980 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
981 load_reg( R_ECX, Rn );
982 check_walign16( R_ECX );
983 load_reg( R_EAX, Rm );
984 ADD_imm8s_r32( -2, R_ECX );
985 store_reg( R_ECX, Rn );
986 MEM_WRITE_WORD( R_ECX, R_EAX );
990 { /* MOV.L Rm, @-Rn */
991 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
992 load_reg( R_EAX, Rm );
993 load_reg( R_ECX, Rn );
994 check_walign32( R_ECX );
995 ADD_imm8s_r32( -4, R_ECX );
996 store_reg( R_ECX, Rn );
997 MEM_WRITE_LONG( R_ECX, R_EAX );
1001 { /* DIV0S Rm, Rn */
1002 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1003 load_reg( R_EAX, Rm );
1004 load_reg( R_ECX, Rn );
1005 SHR_imm8_r32( 31, R_EAX );
1006 SHR_imm8_r32( 31, R_ECX );
1007 store_spreg( R_EAX, R_M );
1008 store_spreg( R_ECX, R_Q );
1009 CMP_r32_r32( R_EAX, R_ECX );
1015 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1016 load_reg( R_EAX, Rm );
1017 load_reg( R_ECX, Rn );
1018 TEST_r32_r32( R_EAX, R_ECX );
1024 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1025 load_reg( R_EAX, Rm );
1026 load_reg( R_ECX, Rn );
1027 AND_r32_r32( R_EAX, R_ECX );
1028 store_reg( R_ECX, Rn );
1033 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1034 load_reg( R_EAX, Rm );
1035 load_reg( R_ECX, Rn );
1036 XOR_r32_r32( R_EAX, R_ECX );
1037 store_reg( R_ECX, Rn );
1042 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1043 load_reg( R_EAX, Rm );
1044 load_reg( R_ECX, Rn );
1045 OR_r32_r32( R_EAX, R_ECX );
1046 store_reg( R_ECX, Rn );
1050 { /* CMP/STR Rm, Rn */
1051 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1052 load_reg( R_EAX, Rm );
1053 load_reg( R_ECX, Rn );
1054 XOR_r32_r32( R_ECX, R_EAX );
1055 TEST_r8_r8( R_AL, R_AL );
1056 JE_rel8(13, target1);
1057 TEST_r8_r8( R_AH, R_AH ); // 2
1058 JE_rel8(9, target2);
1059 SHR_imm8_r32( 16, R_EAX ); // 3
1060 TEST_r8_r8( R_AL, R_AL ); // 2
1061 JE_rel8(2, target3);
1062 TEST_r8_r8( R_AH, R_AH ); // 2
1063 JMP_TARGET(target1);
1064 JMP_TARGET(target2);
1065 JMP_TARGET(target3);
1070 { /* XTRCT Rm, Rn */
1071 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1072 load_reg( R_EAX, Rm );
1073 load_reg( R_ECX, Rn );
1074 SHL_imm8_r32( 16, R_EAX );
1075 SHR_imm8_r32( 16, R_ECX );
1076 OR_r32_r32( R_EAX, R_ECX );
1077 store_reg( R_ECX, Rn );
1081 { /* MULU.W Rm, Rn */
1082 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1083 load_reg16u( R_EAX, Rm );
1084 load_reg16u( R_ECX, Rn );
1086 store_spreg( R_EAX, R_MACL );
1090 { /* MULS.W Rm, Rn */
1091 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1092 load_reg16s( R_EAX, Rm );
1093 load_reg16s( R_ECX, Rn );
1095 store_spreg( R_EAX, R_MACL );
1106 { /* CMP/EQ Rm, Rn */
1107 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1108 load_reg( R_EAX, Rm );
1109 load_reg( R_ECX, Rn );
1110 CMP_r32_r32( R_EAX, R_ECX );
1115 { /* CMP/HS Rm, Rn */
1116 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1117 load_reg( R_EAX, Rm );
1118 load_reg( R_ECX, Rn );
1119 CMP_r32_r32( R_EAX, R_ECX );
1124 { /* CMP/GE Rm, Rn */
1125 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1126 load_reg( R_EAX, Rm );
1127 load_reg( R_ECX, Rn );
1128 CMP_r32_r32( R_EAX, R_ECX );
1134 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1135 load_spreg( R_ECX, R_M );
1136 load_reg( R_EAX, Rn );
1139 SETC_r8( R_DL ); // Q'
1140 CMP_sh4r_r32( R_Q, R_ECX );
1141 JE_rel8(5, mqequal);
1142 ADD_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
1144 JMP_TARGET(mqequal);
1145 SUB_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
1147 store_reg( R_EAX, Rn ); // Done with Rn now
1148 SETC_r8(R_AL); // tmp1
1149 XOR_r8_r8( R_DL, R_AL ); // Q' = Q ^ tmp1
1150 XOR_r8_r8( R_AL, R_CL ); // Q'' = Q' ^ M
1151 store_spreg( R_ECX, R_Q );
1152 XOR_imm8s_r32( 1, R_AL ); // T = !Q'
1153 MOVZX_r8_r32( R_AL, R_EAX );
1154 store_spreg( R_EAX, R_T );
1158 { /* DMULU.L Rm, Rn */
1159 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1160 load_reg( R_EAX, Rm );
1161 load_reg( R_ECX, Rn );
1163 store_spreg( R_EDX, R_MACH );
1164 store_spreg( R_EAX, R_MACL );
1168 { /* CMP/HI Rm, Rn */
1169 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1170 load_reg( R_EAX, Rm );
1171 load_reg( R_ECX, Rn );
1172 CMP_r32_r32( R_EAX, R_ECX );
1177 { /* CMP/GT Rm, Rn */
1178 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1179 load_reg( R_EAX, Rm );
1180 load_reg( R_ECX, Rn );
1181 CMP_r32_r32( R_EAX, R_ECX );
1187 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1188 load_reg( R_EAX, Rm );
1189 load_reg( R_ECX, Rn );
1190 SUB_r32_r32( R_EAX, R_ECX );
1191 store_reg( R_ECX, Rn );
1196 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1197 load_reg( R_EAX, Rm );
1198 load_reg( R_ECX, Rn );
1200 SBB_r32_r32( R_EAX, R_ECX );
1201 store_reg( R_ECX, Rn );
1207 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1208 load_reg( R_EAX, Rm );
1209 load_reg( R_ECX, Rn );
1210 SUB_r32_r32( R_EAX, R_ECX );
1211 store_reg( R_ECX, Rn );
1217 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1218 load_reg( R_EAX, Rm );
1219 load_reg( R_ECX, Rn );
1220 ADD_r32_r32( R_EAX, R_ECX );
1221 store_reg( R_ECX, Rn );
1225 { /* DMULS.L Rm, Rn */
1226 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1227 load_reg( R_EAX, Rm );
1228 load_reg( R_ECX, Rn );
1230 store_spreg( R_EDX, R_MACH );
1231 store_spreg( R_EAX, R_MACL );
1236 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1237 load_reg( R_EAX, Rm );
1238 load_reg( R_ECX, Rn );
1240 ADC_r32_r32( R_EAX, R_ECX );
1241 store_reg( R_ECX, Rn );
1247 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1248 load_reg( R_EAX, Rm );
1249 load_reg( R_ECX, Rn );
1250 ADD_r32_r32( R_EAX, R_ECX );
1251 store_reg( R_ECX, Rn );
1263 switch( (ir&0xF0) >> 4 ) {
1266 uint32_t Rn = ((ir>>8)&0xF);
1267 load_reg( R_EAX, Rn );
1270 store_reg( R_EAX, Rn );
1275 uint32_t Rn = ((ir>>8)&0xF);
1276 load_reg( R_EAX, Rn );
1277 ADD_imm8s_r32( -1, R_EAX );
1278 store_reg( R_EAX, Rn );
1284 uint32_t Rn = ((ir>>8)&0xF);
1285 load_reg( R_EAX, Rn );
1288 store_reg( R_EAX, Rn );
1297 switch( (ir&0xF0) >> 4 ) {
1300 uint32_t Rn = ((ir>>8)&0xF);
1301 load_reg( R_EAX, Rn );
1304 store_reg( R_EAX, Rn );
1309 uint32_t Rn = ((ir>>8)&0xF);
1310 load_reg( R_EAX, Rn );
1311 CMP_imm8s_r32( 0, R_EAX );
1317 uint32_t Rn = ((ir>>8)&0xF);
1318 load_reg( R_EAX, Rn );
1321 store_reg( R_EAX, Rn );
1330 switch( (ir&0xF0) >> 4 ) {
1332 { /* STS.L MACH, @-Rn */
1333 uint32_t Rn = ((ir>>8)&0xF);
1334 load_reg( R_ECX, Rn );
1335 check_walign32( R_ECX );
1336 ADD_imm8s_r32( -4, R_ECX );
1337 store_reg( R_ECX, Rn );
1338 load_spreg( R_EAX, R_MACH );
1339 MEM_WRITE_LONG( R_ECX, R_EAX );
1343 { /* STS.L MACL, @-Rn */
1344 uint32_t Rn = ((ir>>8)&0xF);
1345 load_reg( R_ECX, Rn );
1346 check_walign32( R_ECX );
1347 ADD_imm8s_r32( -4, R_ECX );
1348 store_reg( R_ECX, Rn );
1349 load_spreg( R_EAX, R_MACL );
1350 MEM_WRITE_LONG( R_ECX, R_EAX );
1354 { /* STS.L PR, @-Rn */
1355 uint32_t Rn = ((ir>>8)&0xF);
1356 load_reg( R_ECX, Rn );
1357 check_walign32( R_ECX );
1358 ADD_imm8s_r32( -4, R_ECX );
1359 store_reg( R_ECX, Rn );
1360 load_spreg( R_EAX, R_PR );
1361 MEM_WRITE_LONG( R_ECX, R_EAX );
1365 { /* STC.L SGR, @-Rn */
1366 uint32_t Rn = ((ir>>8)&0xF);
1368 load_reg( R_ECX, Rn );
1369 check_walign32( R_ECX );
1370 ADD_imm8s_r32( -4, R_ECX );
1371 store_reg( R_ECX, Rn );
1372 load_spreg( R_EAX, R_SGR );
1373 MEM_WRITE_LONG( R_ECX, R_EAX );
1377 { /* STS.L FPUL, @-Rn */
1378 uint32_t Rn = ((ir>>8)&0xF);
1379 load_reg( R_ECX, Rn );
1380 check_walign32( R_ECX );
1381 ADD_imm8s_r32( -4, R_ECX );
1382 store_reg( R_ECX, Rn );
1383 load_spreg( R_EAX, R_FPUL );
1384 MEM_WRITE_LONG( R_ECX, R_EAX );
1388 { /* STS.L FPSCR, @-Rn */
1389 uint32_t Rn = ((ir>>8)&0xF);
1390 load_reg( R_ECX, Rn );
1391 check_walign32( R_ECX );
1392 ADD_imm8s_r32( -4, R_ECX );
1393 store_reg( R_ECX, Rn );
1394 load_spreg( R_EAX, R_FPSCR );
1395 MEM_WRITE_LONG( R_ECX, R_EAX );
1399 { /* STC.L DBR, @-Rn */
1400 uint32_t Rn = ((ir>>8)&0xF);
1402 load_reg( R_ECX, Rn );
1403 check_walign32( R_ECX );
1404 ADD_imm8s_r32( -4, R_ECX );
1405 store_reg( R_ECX, Rn );
1406 load_spreg( R_EAX, R_DBR );
1407 MEM_WRITE_LONG( R_ECX, R_EAX );
1416 switch( (ir&0x80) >> 7 ) {
1418 switch( (ir&0x70) >> 4 ) {
1420 { /* STC.L SR, @-Rn */
1421 uint32_t Rn = ((ir>>8)&0xF);
1423 call_func0( sh4_read_sr );
1424 load_reg( R_ECX, Rn );
1425 check_walign32( R_ECX );
1426 ADD_imm8s_r32( -4, R_ECX );
1427 store_reg( R_ECX, Rn );
1428 MEM_WRITE_LONG( R_ECX, R_EAX );
1432 { /* STC.L GBR, @-Rn */
1433 uint32_t Rn = ((ir>>8)&0xF);
1434 load_reg( R_ECX, Rn );
1435 check_walign32( R_ECX );
1436 ADD_imm8s_r32( -4, R_ECX );
1437 store_reg( R_ECX, Rn );
1438 load_spreg( R_EAX, R_GBR );
1439 MEM_WRITE_LONG( R_ECX, R_EAX );
1443 { /* STC.L VBR, @-Rn */
1444 uint32_t Rn = ((ir>>8)&0xF);
1446 load_reg( R_ECX, Rn );
1447 check_walign32( R_ECX );
1448 ADD_imm8s_r32( -4, R_ECX );
1449 store_reg( R_ECX, Rn );
1450 load_spreg( R_EAX, R_VBR );
1451 MEM_WRITE_LONG( R_ECX, R_EAX );
1455 { /* STC.L SSR, @-Rn */
1456 uint32_t Rn = ((ir>>8)&0xF);
1458 load_reg( R_ECX, Rn );
1459 check_walign32( R_ECX );
1460 ADD_imm8s_r32( -4, R_ECX );
1461 store_reg( R_ECX, Rn );
1462 load_spreg( R_EAX, R_SSR );
1463 MEM_WRITE_LONG( R_ECX, R_EAX );
1467 { /* STC.L SPC, @-Rn */
1468 uint32_t Rn = ((ir>>8)&0xF);
1470 load_reg( R_ECX, Rn );
1471 check_walign32( R_ECX );
1472 ADD_imm8s_r32( -4, R_ECX );
1473 store_reg( R_ECX, Rn );
1474 load_spreg( R_EAX, R_SPC );
1475 MEM_WRITE_LONG( R_ECX, R_EAX );
1484 { /* STC.L Rm_BANK, @-Rn */
1485 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm_BANK = ((ir>>4)&0x7);
1487 load_reg( R_ECX, Rn );
1488 check_walign32( R_ECX );
1489 ADD_imm8s_r32( -4, R_ECX );
1490 store_reg( R_ECX, Rn );
1491 load_spreg( R_EAX, REG_OFFSET(r_bank[Rm_BANK]) );
1492 MEM_WRITE_LONG( R_ECX, R_EAX );
1498 switch( (ir&0xF0) >> 4 ) {
1501 uint32_t Rn = ((ir>>8)&0xF);
1502 load_reg( R_EAX, Rn );
1504 store_reg( R_EAX, Rn );
1510 uint32_t Rn = ((ir>>8)&0xF);
1511 load_reg( R_EAX, Rn );
1514 store_reg( R_EAX, Rn );
1524 switch( (ir&0xF0) >> 4 ) {
1527 uint32_t Rn = ((ir>>8)&0xF);
1528 load_reg( R_EAX, Rn );
1530 store_reg( R_EAX, Rn );
1536 uint32_t Rn = ((ir>>8)&0xF);
1537 load_reg( R_EAX, Rn );
1538 CMP_imm8s_r32( 0, R_EAX );
1544 uint32_t Rn = ((ir>>8)&0xF);
1545 load_reg( R_EAX, Rn );
1548 store_reg( R_EAX, Rn );
1558 switch( (ir&0xF0) >> 4 ) {
1560 { /* LDS.L @Rm+, MACH */
1561 uint32_t Rm = ((ir>>8)&0xF);
1562 load_reg( R_EAX, Rm );
1563 check_ralign32( R_EAX );
1564 MOV_r32_r32( R_EAX, R_ECX );
1565 ADD_imm8s_r32( 4, R_EAX );
1566 store_reg( R_EAX, Rm );
1567 MEM_READ_LONG( R_ECX, R_EAX );
1568 store_spreg( R_EAX, R_MACH );
1572 { /* LDS.L @Rm+, MACL */
1573 uint32_t Rm = ((ir>>8)&0xF);
1574 load_reg( R_EAX, Rm );
1575 check_ralign32( R_EAX );
1576 MOV_r32_r32( R_EAX, R_ECX );
1577 ADD_imm8s_r32( 4, R_EAX );
1578 store_reg( R_EAX, Rm );
1579 MEM_READ_LONG( R_ECX, R_EAX );
1580 store_spreg( R_EAX, R_MACL );
1584 { /* LDS.L @Rm+, PR */
1585 uint32_t Rm = ((ir>>8)&0xF);
1586 load_reg( R_EAX, Rm );
1587 check_ralign32( R_EAX );
1588 MOV_r32_r32( R_EAX, R_ECX );
1589 ADD_imm8s_r32( 4, R_EAX );
1590 store_reg( R_EAX, Rm );
1591 MEM_READ_LONG( R_ECX, R_EAX );
1592 store_spreg( R_EAX, R_PR );
1596 { /* LDC.L @Rm+, SGR */
1597 uint32_t Rm = ((ir>>8)&0xF);
1599 load_reg( R_EAX, Rm );
1600 check_ralign32( R_EAX );
1601 MOV_r32_r32( R_EAX, R_ECX );
1602 ADD_imm8s_r32( 4, R_EAX );
1603 store_reg( R_EAX, Rm );
1604 MEM_READ_LONG( R_ECX, R_EAX );
1605 store_spreg( R_EAX, R_SGR );
1609 { /* LDS.L @Rm+, FPUL */
1610 uint32_t Rm = ((ir>>8)&0xF);
1611 load_reg( R_EAX, Rm );
1612 check_ralign32( R_EAX );
1613 MOV_r32_r32( R_EAX, R_ECX );
1614 ADD_imm8s_r32( 4, R_EAX );
1615 store_reg( R_EAX, Rm );
1616 MEM_READ_LONG( R_ECX, R_EAX );
1617 store_spreg( R_EAX, R_FPUL );
1621 { /* LDS.L @Rm+, FPSCR */
1622 uint32_t Rm = ((ir>>8)&0xF);
1623 load_reg( R_EAX, Rm );
1624 check_ralign32( R_EAX );
1625 MOV_r32_r32( R_EAX, R_ECX );
1626 ADD_imm8s_r32( 4, R_EAX );
1627 store_reg( R_EAX, Rm );
1628 MEM_READ_LONG( R_ECX, R_EAX );
1629 store_spreg( R_EAX, R_FPSCR );
1630 update_fr_bank( R_EAX );
1634 { /* LDC.L @Rm+, DBR */
1635 uint32_t Rm = ((ir>>8)&0xF);
1637 load_reg( R_EAX, Rm );
1638 check_ralign32( R_EAX );
1639 MOV_r32_r32( R_EAX, R_ECX );
1640 ADD_imm8s_r32( 4, R_EAX );
1641 store_reg( R_EAX, Rm );
1642 MEM_READ_LONG( R_ECX, R_EAX );
1643 store_spreg( R_EAX, R_DBR );
1652 switch( (ir&0x80) >> 7 ) {
1654 switch( (ir&0x70) >> 4 ) {
1656 { /* LDC.L @Rm+, SR */
1657 uint32_t Rm = ((ir>>8)&0xF);
1658 if( sh4_x86.in_delay_slot ) {
1662 load_reg( R_EAX, Rm );
1663 check_ralign32( R_EAX );
1664 MOV_r32_r32( R_EAX, R_ECX );
1665 ADD_imm8s_r32( 4, R_EAX );
1666 store_reg( R_EAX, Rm );
1667 MEM_READ_LONG( R_ECX, R_EAX );
1668 call_func1( sh4_write_sr, R_EAX );
1669 sh4_x86.priv_checked = FALSE;
1670 sh4_x86.fpuen_checked = FALSE;
1675 { /* LDC.L @Rm+, GBR */
1676 uint32_t Rm = ((ir>>8)&0xF);
1677 load_reg( R_EAX, Rm );
1678 check_ralign32( R_EAX );
1679 MOV_r32_r32( R_EAX, R_ECX );
1680 ADD_imm8s_r32( 4, R_EAX );
1681 store_reg( R_EAX, Rm );
1682 MEM_READ_LONG( R_ECX, R_EAX );
1683 store_spreg( R_EAX, R_GBR );
1687 { /* LDC.L @Rm+, VBR */
1688 uint32_t Rm = ((ir>>8)&0xF);
1690 load_reg( R_EAX, Rm );
1691 check_ralign32( R_EAX );
1692 MOV_r32_r32( R_EAX, R_ECX );
1693 ADD_imm8s_r32( 4, R_EAX );
1694 store_reg( R_EAX, Rm );
1695 MEM_READ_LONG( R_ECX, R_EAX );
1696 store_spreg( R_EAX, R_VBR );
1700 { /* LDC.L @Rm+, SSR */
1701 uint32_t Rm = ((ir>>8)&0xF);
1703 load_reg( R_EAX, Rm );
1704 MOV_r32_r32( R_EAX, R_ECX );
1705 ADD_imm8s_r32( 4, R_EAX );
1706 store_reg( R_EAX, Rm );
1707 MEM_READ_LONG( R_ECX, R_EAX );
1708 store_spreg( R_EAX, R_SSR );
1712 { /* LDC.L @Rm+, SPC */
1713 uint32_t Rm = ((ir>>8)&0xF);
1715 load_reg( R_EAX, Rm );
1716 check_ralign32( R_EAX );
1717 MOV_r32_r32( R_EAX, R_ECX );
1718 ADD_imm8s_r32( 4, R_EAX );
1719 store_reg( R_EAX, Rm );
1720 MEM_READ_LONG( R_ECX, R_EAX );
1721 store_spreg( R_EAX, R_SPC );
1730 { /* LDC.L @Rm+, Rn_BANK */
1731 uint32_t Rm = ((ir>>8)&0xF); uint32_t Rn_BANK = ((ir>>4)&0x7);
1733 load_reg( R_EAX, Rm );
1734 check_ralign32( R_EAX );
1735 MOV_r32_r32( R_EAX, R_ECX );
1736 ADD_imm8s_r32( 4, R_EAX );
1737 store_reg( R_EAX, Rm );
1738 MEM_READ_LONG( R_ECX, R_EAX );
1739 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
1745 switch( (ir&0xF0) >> 4 ) {
1748 uint32_t Rn = ((ir>>8)&0xF);
1749 load_reg( R_EAX, Rn );
1750 SHL_imm8_r32( 2, R_EAX );
1751 store_reg( R_EAX, Rn );
1756 uint32_t Rn = ((ir>>8)&0xF);
1757 load_reg( R_EAX, Rn );
1758 SHL_imm8_r32( 8, R_EAX );
1759 store_reg( R_EAX, Rn );
1764 uint32_t Rn = ((ir>>8)&0xF);
1765 load_reg( R_EAX, Rn );
1766 SHL_imm8_r32( 16, R_EAX );
1767 store_reg( R_EAX, Rn );
1776 switch( (ir&0xF0) >> 4 ) {
1779 uint32_t Rn = ((ir>>8)&0xF);
1780 load_reg( R_EAX, Rn );
1781 SHR_imm8_r32( 2, R_EAX );
1782 store_reg( R_EAX, Rn );
1787 uint32_t Rn = ((ir>>8)&0xF);
1788 load_reg( R_EAX, Rn );
1789 SHR_imm8_r32( 8, R_EAX );
1790 store_reg( R_EAX, Rn );
1795 uint32_t Rn = ((ir>>8)&0xF);
1796 load_reg( R_EAX, Rn );
1797 SHR_imm8_r32( 16, R_EAX );
1798 store_reg( R_EAX, Rn );
1807 switch( (ir&0xF0) >> 4 ) {
1809 { /* LDS Rm, MACH */
1810 uint32_t Rm = ((ir>>8)&0xF);
1811 load_reg( R_EAX, Rm );
1812 store_spreg( R_EAX, R_MACH );
1816 { /* LDS Rm, MACL */
1817 uint32_t Rm = ((ir>>8)&0xF);
1818 load_reg( R_EAX, Rm );
1819 store_spreg( R_EAX, R_MACL );
1824 uint32_t Rm = ((ir>>8)&0xF);
1825 load_reg( R_EAX, Rm );
1826 store_spreg( R_EAX, R_PR );
1831 uint32_t Rm = ((ir>>8)&0xF);
1833 load_reg( R_EAX, Rm );
1834 store_spreg( R_EAX, R_SGR );
1838 { /* LDS Rm, FPUL */
1839 uint32_t Rm = ((ir>>8)&0xF);
1840 load_reg( R_EAX, Rm );
1841 store_spreg( R_EAX, R_FPUL );
1845 { /* LDS Rm, FPSCR */
1846 uint32_t Rm = ((ir>>8)&0xF);
1847 load_reg( R_EAX, Rm );
1848 store_spreg( R_EAX, R_FPSCR );
1849 update_fr_bank( R_EAX );
1854 uint32_t Rm = ((ir>>8)&0xF);
1856 load_reg( R_EAX, Rm );
1857 store_spreg( R_EAX, R_DBR );
1866 switch( (ir&0xF0) >> 4 ) {
1869 uint32_t Rn = ((ir>>8)&0xF);
1870 if( sh4_x86.in_delay_slot ) {
1873 load_imm32( R_EAX, pc + 4 );
1874 store_spreg( R_EAX, R_PR );
1875 load_reg( R_EDI, Rn );
1876 sh4_x86.in_delay_slot = TRUE;
1883 uint32_t Rn = ((ir>>8)&0xF);
1884 load_reg( R_ECX, Rn );
1885 MEM_READ_BYTE( R_ECX, R_EAX );
1886 TEST_r8_r8( R_AL, R_AL );
1888 OR_imm8_r8( 0x80, R_AL );
1889 load_reg( R_ECX, Rn );
1890 MEM_WRITE_BYTE( R_ECX, R_EAX );
1895 uint32_t Rn = ((ir>>8)&0xF);
1896 if( sh4_x86.in_delay_slot ) {
1899 load_reg( R_EDI, Rn );
1900 sh4_x86.in_delay_slot = TRUE;
1912 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1913 /* Annoyingly enough, not directly convertible */
1914 load_reg( R_EAX, Rn );
1915 load_reg( R_ECX, Rm );
1916 CMP_imm32_r32( 0, R_ECX );
1917 JGE_rel8(16, doshl);
1919 NEG_r32( R_ECX ); // 2
1920 AND_imm8_r8( 0x1F, R_CL ); // 3
1921 JE_rel8( 4, emptysar); // 2
1922 SAR_r32_CL( R_EAX ); // 2
1923 JMP_rel8(10, end); // 2
1925 JMP_TARGET(emptysar);
1926 SAR_imm8_r32(31, R_EAX ); // 3
1930 AND_imm8_r8( 0x1F, R_CL ); // 3
1931 SHL_r32_CL( R_EAX ); // 2
1934 store_reg( R_EAX, Rn );
1939 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1940 load_reg( R_EAX, Rn );
1941 load_reg( R_ECX, Rm );
1942 CMP_imm32_r32( 0, R_ECX );
1943 JGE_rel8(15, doshl);
1945 NEG_r32( R_ECX ); // 2
1946 AND_imm8_r8( 0x1F, R_CL ); // 3
1947 JE_rel8( 4, emptyshr );
1948 SHR_r32_CL( R_EAX ); // 2
1949 JMP_rel8(9, end); // 2
1951 JMP_TARGET(emptyshr);
1952 XOR_r32_r32( R_EAX, R_EAX );
1956 AND_imm8_r8( 0x1F, R_CL ); // 3
1957 SHL_r32_CL( R_EAX ); // 2
1960 store_reg( R_EAX, Rn );
1964 switch( (ir&0x80) >> 7 ) {
1966 switch( (ir&0x70) >> 4 ) {
1969 uint32_t Rm = ((ir>>8)&0xF);
1970 if( sh4_x86.in_delay_slot ) {
1974 load_reg( R_EAX, Rm );
1975 call_func1( sh4_write_sr, R_EAX );
1976 sh4_x86.priv_checked = FALSE;
1977 sh4_x86.fpuen_checked = FALSE;
1983 uint32_t Rm = ((ir>>8)&0xF);
1984 load_reg( R_EAX, Rm );
1985 store_spreg( R_EAX, R_GBR );
1990 uint32_t Rm = ((ir>>8)&0xF);
1992 load_reg( R_EAX, Rm );
1993 store_spreg( R_EAX, R_VBR );
1998 uint32_t Rm = ((ir>>8)&0xF);
2000 load_reg( R_EAX, Rm );
2001 store_spreg( R_EAX, R_SSR );
2006 uint32_t Rm = ((ir>>8)&0xF);
2008 load_reg( R_EAX, Rm );
2009 store_spreg( R_EAX, R_SPC );
2018 { /* LDC Rm, Rn_BANK */
2019 uint32_t Rm = ((ir>>8)&0xF); uint32_t Rn_BANK = ((ir>>4)&0x7);
2021 load_reg( R_EAX, Rm );
2022 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
2028 { /* MAC.W @Rm+, @Rn+ */
2029 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2030 load_reg( R_ECX, Rm );
2031 check_ralign16( R_ECX );
2032 load_reg( R_ECX, Rn );
2033 check_ralign16( R_ECX );
2034 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
2035 MEM_READ_WORD( R_ECX, R_EAX );
2037 load_reg( R_ECX, Rm );
2038 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
2039 MEM_READ_WORD( R_ECX, R_EAX );
2043 load_spreg( R_ECX, R_S );
2044 TEST_r32_r32( R_ECX, R_ECX );
2045 JE_rel8( 47, nosat );
2047 ADD_r32_sh4r( R_EAX, R_MACL ); // 6
2048 JNO_rel8( 51, end ); // 2
2049 load_imm32( R_EDX, 1 ); // 5
2050 store_spreg( R_EDX, R_MACH ); // 6
2051 JS_rel8( 13, positive ); // 2
2052 load_imm32( R_EAX, 0x80000000 );// 5
2053 store_spreg( R_EAX, R_MACL ); // 6
2054 JMP_rel8( 25, end2 ); // 2
2056 JMP_TARGET(positive);
2057 load_imm32( R_EAX, 0x7FFFFFFF );// 5
2058 store_spreg( R_EAX, R_MACL ); // 6
2059 JMP_rel8( 12, end3); // 2
2062 ADD_r32_sh4r( R_EAX, R_MACL ); // 6
2063 ADC_r32_sh4r( R_EDX, R_MACH ); // 6
2072 { /* MOV.L @(disp, Rm), Rn */
2073 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2;
2074 load_reg( R_ECX, Rm );
2075 ADD_imm8s_r32( disp, R_ECX );
2076 check_ralign32( R_ECX );
2077 MEM_READ_LONG( R_ECX, R_EAX );
2078 store_reg( R_EAX, Rn );
2084 { /* MOV.B @Rm, Rn */
2085 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2086 load_reg( R_ECX, Rm );
2087 MEM_READ_BYTE( R_ECX, R_EAX );
2088 store_reg( R_EAX, Rn );
2092 { /* MOV.W @Rm, Rn */
2093 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2094 load_reg( R_ECX, Rm );
2095 check_ralign16( R_ECX );
2096 MEM_READ_WORD( R_ECX, R_EAX );
2097 store_reg( R_EAX, Rn );
2101 { /* MOV.L @Rm, Rn */
2102 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2103 load_reg( R_ECX, Rm );
2104 check_ralign32( R_ECX );
2105 MEM_READ_LONG( R_ECX, R_EAX );
2106 store_reg( R_EAX, Rn );
2111 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2112 load_reg( R_EAX, Rm );
2113 store_reg( R_EAX, Rn );
2117 { /* MOV.B @Rm+, Rn */
2118 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2119 load_reg( R_ECX, Rm );
2120 MOV_r32_r32( R_ECX, R_EAX );
2121 ADD_imm8s_r32( 1, R_EAX );
2122 store_reg( R_EAX, Rm );
2123 MEM_READ_BYTE( R_ECX, R_EAX );
2124 store_reg( R_EAX, Rn );
2128 { /* MOV.W @Rm+, Rn */
2129 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2130 load_reg( R_EAX, Rm );
2131 check_ralign16( R_EAX );
2132 MOV_r32_r32( R_EAX, R_ECX );
2133 ADD_imm8s_r32( 2, R_EAX );
2134 store_reg( R_EAX, Rm );
2135 MEM_READ_WORD( R_ECX, R_EAX );
2136 store_reg( R_EAX, Rn );
2140 { /* MOV.L @Rm+, Rn */
2141 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2142 load_reg( R_EAX, Rm );
2143 check_ralign32( R_EAX );
2144 MOV_r32_r32( R_EAX, R_ECX );
2145 ADD_imm8s_r32( 4, R_EAX );
2146 store_reg( R_EAX, Rm );
2147 MEM_READ_LONG( R_ECX, R_EAX );
2148 store_reg( R_EAX, Rn );
2153 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2154 load_reg( R_EAX, Rm );
2156 store_reg( R_EAX, Rn );
2160 { /* SWAP.B Rm, Rn */
2161 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2162 load_reg( R_EAX, Rm );
2163 XCHG_r8_r8( R_AL, R_AH );
2164 store_reg( R_EAX, Rn );
2168 { /* SWAP.W Rm, Rn */
2169 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2170 load_reg( R_EAX, Rm );
2171 MOV_r32_r32( R_EAX, R_ECX );
2172 SHL_imm8_r32( 16, R_ECX );
2173 SHR_imm8_r32( 16, R_EAX );
2174 OR_r32_r32( R_EAX, R_ECX );
2175 store_reg( R_ECX, Rn );
2180 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2181 load_reg( R_EAX, Rm );
2182 XOR_r32_r32( R_ECX, R_ECX );
2184 SBB_r32_r32( R_EAX, R_ECX );
2185 store_reg( R_ECX, Rn );
2191 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2192 load_reg( R_EAX, Rm );
2194 store_reg( R_EAX, Rn );
2198 { /* EXTU.B Rm, Rn */
2199 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2200 load_reg( R_EAX, Rm );
2201 MOVZX_r8_r32( R_EAX, R_EAX );
2202 store_reg( R_EAX, Rn );
2206 { /* EXTU.W Rm, Rn */
2207 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2208 load_reg( R_EAX, Rm );
2209 MOVZX_r16_r32( R_EAX, R_EAX );
2210 store_reg( R_EAX, Rn );
2214 { /* EXTS.B Rm, Rn */
2215 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2216 load_reg( R_EAX, Rm );
2217 MOVSX_r8_r32( R_EAX, R_EAX );
2218 store_reg( R_EAX, Rn );
2222 { /* EXTS.W Rm, Rn */
2223 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2224 load_reg( R_EAX, Rm );
2225 MOVSX_r16_r32( R_EAX, R_EAX );
2226 store_reg( R_EAX, Rn );
2232 { /* ADD #imm, Rn */
2233 uint32_t Rn = ((ir>>8)&0xF); int32_t imm = SIGNEXT8(ir&0xFF);
2234 load_reg( R_EAX, Rn );
2235 ADD_imm8s_r32( imm, R_EAX );
2236 store_reg( R_EAX, Rn );
2240 switch( (ir&0xF00) >> 8 ) {
2242 { /* MOV.B R0, @(disp, Rn) */
2243 uint32_t Rn = ((ir>>4)&0xF); uint32_t disp = (ir&0xF);
2244 load_reg( R_EAX, 0 );
2245 load_reg( R_ECX, Rn );
2246 ADD_imm32_r32( disp, R_ECX );
2247 MEM_WRITE_BYTE( R_ECX, R_EAX );
2251 { /* MOV.W R0, @(disp, Rn) */
2252 uint32_t Rn = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1;
2253 load_reg( R_ECX, Rn );
2254 load_reg( R_EAX, 0 );
2255 ADD_imm32_r32( disp, R_ECX );
2256 check_walign16( R_ECX );
2257 MEM_WRITE_WORD( R_ECX, R_EAX );
2261 { /* MOV.B @(disp, Rm), R0 */
2262 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF);
2263 load_reg( R_ECX, Rm );
2264 ADD_imm32_r32( disp, R_ECX );
2265 MEM_READ_BYTE( R_ECX, R_EAX );
2266 store_reg( R_EAX, 0 );
2270 { /* MOV.W @(disp, Rm), R0 */
2271 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1;
2272 load_reg( R_ECX, Rm );
2273 ADD_imm32_r32( disp, R_ECX );
2274 check_ralign16( R_ECX );
2275 MEM_READ_WORD( R_ECX, R_EAX );
2276 store_reg( R_EAX, 0 );
2280 { /* CMP/EQ #imm, R0 */
2281 int32_t imm = SIGNEXT8(ir&0xFF);
2282 load_reg( R_EAX, 0 );
2283 CMP_imm8s_r32(imm, R_EAX);
2289 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2290 if( sh4_x86.in_delay_slot ) {
2293 load_imm32( R_EDI, pc + 2 );
2294 CMP_imm8s_sh4r( 0, R_T );
2295 JE_rel8( 5, nottaken );
2296 load_imm32( R_EDI, disp + pc + 4 );
2297 JMP_TARGET(nottaken);
2305 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2306 if( sh4_x86.in_delay_slot ) {
2309 load_imm32( R_EDI, pc + 2 );
2310 CMP_imm8s_sh4r( 0, R_T );
2311 JNE_rel8( 5, nottaken );
2312 load_imm32( R_EDI, disp + pc + 4 );
2313 JMP_TARGET(nottaken);
2321 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2322 if( sh4_x86.in_delay_slot ) {
2325 load_imm32( R_EDI, pc + 4 );
2326 CMP_imm8s_sh4r( 0, R_T );
2327 JE_rel8( 5, nottaken );
2328 load_imm32( R_EDI, disp + pc + 4 );
2329 JMP_TARGET(nottaken);
2330 sh4_x86.in_delay_slot = TRUE;
2337 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2338 if( sh4_x86.in_delay_slot ) {
2341 load_imm32( R_EDI, pc + 4 );
2342 CMP_imm8s_sh4r( 0, R_T );
2343 JNE_rel8( 5, nottaken );
2344 load_imm32( R_EDI, disp + pc + 4 );
2345 JMP_TARGET(nottaken);
2346 sh4_x86.in_delay_slot = TRUE;
2357 { /* MOV.W @(disp, PC), Rn */
2358 uint32_t Rn = ((ir>>8)&0xF); uint32_t disp = (ir&0xFF)<<1;
2359 if( sh4_x86.in_delay_slot ) {
2362 load_imm32( R_ECX, pc + disp + 4 );
2363 MEM_READ_WORD( R_ECX, R_EAX );
2364 store_reg( R_EAX, Rn );
2370 int32_t disp = SIGNEXT12(ir&0xFFF)<<1;
2371 if( sh4_x86.in_delay_slot ) {
2374 load_imm32( R_EDI, disp + pc + 4 );
2375 sh4_x86.in_delay_slot = TRUE;
2382 int32_t disp = SIGNEXT12(ir&0xFFF)<<1;
2383 if( sh4_x86.in_delay_slot ) {
2386 load_imm32( R_EAX, pc + 4 );
2387 store_spreg( R_EAX, R_PR );
2388 load_imm32( R_EDI, disp + pc + 4 );
2389 sh4_x86.in_delay_slot = TRUE;
2395 switch( (ir&0xF00) >> 8 ) {
2397 { /* MOV.B R0, @(disp, GBR) */
2398 uint32_t disp = (ir&0xFF);
2399 load_reg( R_EAX, 0 );
2400 load_spreg( R_ECX, R_GBR );
2401 ADD_imm32_r32( disp, R_ECX );
2402 MEM_WRITE_BYTE( R_ECX, R_EAX );
2406 { /* MOV.W R0, @(disp, GBR) */
2407 uint32_t disp = (ir&0xFF)<<1;
2408 load_spreg( R_ECX, R_GBR );
2409 load_reg( R_EAX, 0 );
2410 ADD_imm32_r32( disp, R_ECX );
2411 check_walign16( R_ECX );
2412 MEM_WRITE_WORD( R_ECX, R_EAX );
2416 { /* MOV.L R0, @(disp, GBR) */
2417 uint32_t disp = (ir&0xFF)<<2;
2418 load_spreg( R_ECX, R_GBR );
2419 load_reg( R_EAX, 0 );
2420 ADD_imm32_r32( disp, R_ECX );
2421 check_walign32( R_ECX );
2422 MEM_WRITE_LONG( R_ECX, R_EAX );
2427 uint32_t imm = (ir&0xFF);
2428 if( sh4_x86.in_delay_slot ) {
2432 call_func0( sh4_raise_trap );
2433 ADD_imm8s_r32( 4, R_ESP );
2438 { /* MOV.B @(disp, GBR), R0 */
2439 uint32_t disp = (ir&0xFF);
2440 load_spreg( R_ECX, R_GBR );
2441 ADD_imm32_r32( disp, R_ECX );
2442 MEM_READ_BYTE( R_ECX, R_EAX );
2443 store_reg( R_EAX, 0 );
2447 { /* MOV.W @(disp, GBR), R0 */
2448 uint32_t disp = (ir&0xFF)<<1;
2449 load_spreg( R_ECX, R_GBR );
2450 ADD_imm32_r32( disp, R_ECX );
2451 check_ralign16( R_ECX );
2452 MEM_READ_WORD( R_ECX, R_EAX );
2453 store_reg( R_EAX, 0 );
2457 { /* MOV.L @(disp, GBR), R0 */
2458 uint32_t disp = (ir&0xFF)<<2;
2459 load_spreg( R_ECX, R_GBR );
2460 ADD_imm32_r32( disp, R_ECX );
2461 check_ralign32( R_ECX );
2462 MEM_READ_LONG( R_ECX, R_EAX );
2463 store_reg( R_EAX, 0 );
2467 { /* MOVA @(disp, PC), R0 */
2468 uint32_t disp = (ir&0xFF)<<2;
2469 if( sh4_x86.in_delay_slot ) {
2472 load_imm32( R_ECX, (pc & 0xFFFFFFFC) + disp + 4 );
2473 store_reg( R_ECX, 0 );
2478 { /* TST #imm, R0 */
2479 uint32_t imm = (ir&0xFF);
2480 load_reg( R_EAX, 0 );
2481 TEST_imm32_r32( imm, R_EAX );
2486 { /* AND #imm, R0 */
2487 uint32_t imm = (ir&0xFF);
2488 load_reg( R_EAX, 0 );
2489 AND_imm32_r32(imm, R_EAX);
2490 store_reg( R_EAX, 0 );
2494 { /* XOR #imm, R0 */
2495 uint32_t imm = (ir&0xFF);
2496 load_reg( R_EAX, 0 );
2497 XOR_imm32_r32( imm, R_EAX );
2498 store_reg( R_EAX, 0 );
2503 uint32_t imm = (ir&0xFF);
2504 load_reg( R_EAX, 0 );
2505 OR_imm32_r32(imm, R_EAX);
2506 store_reg( R_EAX, 0 );
2510 { /* TST.B #imm, @(R0, GBR) */
2511 uint32_t imm = (ir&0xFF);
2512 load_reg( R_EAX, 0);
2513 load_reg( R_ECX, R_GBR);
2514 ADD_r32_r32( R_EAX, R_ECX );
2515 MEM_READ_BYTE( R_ECX, R_EAX );
2516 TEST_imm8_r8( imm, R_AL );
2521 { /* AND.B #imm, @(R0, GBR) */
2522 uint32_t imm = (ir&0xFF);
2523 load_reg( R_EAX, 0 );
2524 load_spreg( R_ECX, R_GBR );
2525 ADD_r32_r32( R_EAX, R_ECX );
2527 call_func0(sh4_read_byte);
2529 AND_imm32_r32(imm, R_EAX );
2530 MEM_WRITE_BYTE( R_ECX, R_EAX );
2534 { /* XOR.B #imm, @(R0, GBR) */
2535 uint32_t imm = (ir&0xFF);
2536 load_reg( R_EAX, 0 );
2537 load_spreg( R_ECX, R_GBR );
2538 ADD_r32_r32( R_EAX, R_ECX );
2540 call_func0(sh4_read_byte);
2542 XOR_imm32_r32( imm, R_EAX );
2543 MEM_WRITE_BYTE( R_ECX, R_EAX );
2547 { /* OR.B #imm, @(R0, GBR) */
2548 uint32_t imm = (ir&0xFF);
2549 load_reg( R_EAX, 0 );
2550 load_spreg( R_ECX, R_GBR );
2551 ADD_r32_r32( R_EAX, R_ECX );
2553 call_func0(sh4_read_byte);
2555 OR_imm32_r32(imm, R_EAX );
2556 MEM_WRITE_BYTE( R_ECX, R_EAX );
2562 { /* MOV.L @(disp, PC), Rn */
2563 uint32_t Rn = ((ir>>8)&0xF); uint32_t disp = (ir&0xFF)<<2;
2564 if( sh4_x86.in_delay_slot ) {
2567 uint32_t target = (pc & 0xFFFFFFFC) + disp + 4;
2568 char *ptr = mem_get_region(target);
2570 MOV_moff32_EAX( (uint32_t)ptr );
2572 load_imm32( R_ECX, target );
2573 MEM_READ_LONG( R_ECX, R_EAX );
2575 store_reg( R_EAX, Rn );
2580 { /* MOV #imm, Rn */
2581 uint32_t Rn = ((ir>>8)&0xF); int32_t imm = SIGNEXT8(ir&0xFF);
2582 load_imm32( R_EAX, imm );
2583 store_reg( R_EAX, Rn );
2589 { /* FADD FRm, FRn */
2590 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2592 load_spreg( R_ECX, R_FPSCR );
2593 TEST_imm32_r32( FPSCR_PR, R_ECX );
2594 load_fr_bank( R_EDX );
2595 JNE_rel8(13,doubleprec);
2596 push_fr(R_EDX, FRm);
2597 push_fr(R_EDX, FRn);
2601 JMP_TARGET(doubleprec);
2602 push_dr(R_EDX, FRm);
2603 push_dr(R_EDX, FRn);
2610 { /* FSUB FRm, FRn */
2611 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2613 load_spreg( R_ECX, R_FPSCR );
2614 TEST_imm32_r32( FPSCR_PR, R_ECX );
2615 load_fr_bank( R_EDX );
2616 JNE_rel8(13, doubleprec);
2617 push_fr(R_EDX, FRn);
2618 push_fr(R_EDX, FRm);
2622 JMP_TARGET(doubleprec);
2623 push_dr(R_EDX, FRn);
2624 push_dr(R_EDX, FRm);
2631 { /* FMUL FRm, FRn */
2632 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2634 load_spreg( R_ECX, R_FPSCR );
2635 TEST_imm32_r32( FPSCR_PR, R_ECX );
2636 load_fr_bank( R_EDX );
2637 JNE_rel8(13, doubleprec);
2638 push_fr(R_EDX, FRm);
2639 push_fr(R_EDX, FRn);
2643 JMP_TARGET(doubleprec);
2644 push_dr(R_EDX, FRm);
2645 push_dr(R_EDX, FRn);
2652 { /* FDIV FRm, FRn */
2653 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2655 load_spreg( R_ECX, R_FPSCR );
2656 TEST_imm32_r32( FPSCR_PR, R_ECX );
2657 load_fr_bank( R_EDX );
2658 JNE_rel8(13, doubleprec);
2659 push_fr(R_EDX, FRn);
2660 push_fr(R_EDX, FRm);
2664 JMP_TARGET(doubleprec);
2665 push_dr(R_EDX, FRn);
2666 push_dr(R_EDX, FRm);
2673 { /* FCMP/EQ FRm, FRn */
2674 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2676 load_spreg( R_ECX, R_FPSCR );
2677 TEST_imm32_r32( FPSCR_PR, R_ECX );
2678 load_fr_bank( R_EDX );
2679 JNE_rel8(8, doubleprec);
2680 push_fr(R_EDX, FRm);
2681 push_fr(R_EDX, FRn);
2683 JMP_TARGET(doubleprec);
2684 push_dr(R_EDX, FRm);
2685 push_dr(R_EDX, FRn);
2693 { /* FCMP/GT FRm, FRn */
2694 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2696 load_spreg( R_ECX, R_FPSCR );
2697 TEST_imm32_r32( FPSCR_PR, R_ECX );
2698 load_fr_bank( R_EDX );
2699 JNE_rel8(8, doubleprec);
2700 push_fr(R_EDX, FRm);
2701 push_fr(R_EDX, FRn);
2703 JMP_TARGET(doubleprec);
2704 push_dr(R_EDX, FRm);
2705 push_dr(R_EDX, FRn);
2713 { /* FMOV @(R0, Rm), FRn */
2714 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2716 load_reg( R_EDX, Rm );
2717 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EDX );
2718 check_ralign32( R_EDX );
2719 load_spreg( R_ECX, R_FPSCR );
2720 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2721 JNE_rel8(19, doublesize);
2722 MEM_READ_LONG( R_EDX, R_EAX );
2723 load_fr_bank( R_ECX );
2724 store_fr( R_ECX, R_EAX, FRn );
2727 JMP_TARGET(doublesize);
2728 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2729 load_spreg( R_ECX, R_FPSCR ); // assume read_long clobbered it
2730 load_xf_bank( R_ECX );
2731 store_fr( R_ECX, R_EAX, FRn&0x0E );
2732 store_fr( R_ECX, R_EDX, FRn|0x01 );
2736 JMP_TARGET(doublesize);
2737 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2738 load_fr_bank( R_ECX );
2739 store_fr( R_ECX, R_EAX, FRn&0x0E );
2740 store_fr( R_ECX, R_EDX, FRn|0x01 );
2746 { /* FMOV FRm, @(R0, Rn) */
2747 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2749 load_reg( R_EDX, Rn );
2750 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EDX );
2751 check_walign32( R_EDX );
2752 load_spreg( R_ECX, R_FPSCR );
2753 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2754 JNE_rel8(20, doublesize);
2755 load_fr_bank( R_ECX );
2756 load_fr( R_ECX, R_EAX, FRm );
2757 MEM_WRITE_LONG( R_EDX, R_EAX ); // 12
2759 JMP_rel8( 48, end );
2760 JMP_TARGET(doublesize);
2761 load_xf_bank( R_ECX );
2762 load_fr( R_ECX, R_EAX, FRm&0x0E );
2763 load_fr( R_ECX, R_ECX, FRm|0x01 );
2764 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2767 JMP_rel8( 39, end );
2768 JMP_TARGET(doublesize);
2769 load_fr_bank( R_ECX );
2770 load_fr( R_ECX, R_EAX, FRm&0x0E );
2771 load_fr( R_ECX, R_ECX, FRm|0x01 );
2772 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2778 { /* FMOV @Rm, FRn */
2779 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2781 load_reg( R_EDX, Rm );
2782 check_ralign32( R_EDX );
2783 load_spreg( R_ECX, R_FPSCR );
2784 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2785 JNE_rel8(19, doublesize);
2786 MEM_READ_LONG( R_EDX, R_EAX );
2787 load_fr_bank( R_ECX );
2788 store_fr( R_ECX, R_EAX, FRn );
2791 JMP_TARGET(doublesize);
2792 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2793 load_spreg( R_ECX, R_FPSCR ); // assume read_long clobbered it
2794 load_xf_bank( R_ECX );
2795 store_fr( R_ECX, R_EAX, FRn&0x0E );
2796 store_fr( R_ECX, R_EDX, FRn|0x01 );
2800 JMP_TARGET(doublesize);
2801 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2802 load_fr_bank( R_ECX );
2803 store_fr( R_ECX, R_EAX, FRn&0x0E );
2804 store_fr( R_ECX, R_EDX, FRn|0x01 );
2810 { /* FMOV @Rm+, FRn */
2811 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2813 load_reg( R_EDX, Rm );
2814 check_ralign32( R_EDX );
2815 MOV_r32_r32( R_EDX, R_EAX );
2816 load_spreg( R_ECX, R_FPSCR );
2817 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2818 JNE_rel8(25, doublesize);
2819 ADD_imm8s_r32( 4, R_EAX );
2820 store_reg( R_EAX, Rm );
2821 MEM_READ_LONG( R_EDX, R_EAX );
2822 load_fr_bank( R_ECX );
2823 store_fr( R_ECX, R_EAX, FRn );
2826 JMP_TARGET(doublesize);
2827 ADD_imm8s_r32( 8, R_EAX );
2828 store_reg(R_EAX, Rm);
2829 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2830 load_spreg( R_ECX, R_FPSCR ); // assume read_long clobbered it
2831 load_xf_bank( R_ECX );
2832 store_fr( R_ECX, R_EAX, FRn&0x0E );
2833 store_fr( R_ECX, R_EDX, FRn|0x01 );
2837 ADD_imm8s_r32( 8, R_EAX );
2838 store_reg(R_EAX, Rm);
2839 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2840 load_fr_bank( R_ECX );
2841 store_fr( R_ECX, R_EAX, FRn&0x0E );
2842 store_fr( R_ECX, R_EDX, FRn|0x01 );
2848 { /* FMOV FRm, @Rn */
2849 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2851 load_reg( R_EDX, Rn );
2852 check_walign32( R_EDX );
2853 load_spreg( R_ECX, R_FPSCR );
2854 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2855 JNE_rel8(20, doublesize);
2856 load_fr_bank( R_ECX );
2857 load_fr( R_ECX, R_EAX, FRm );
2858 MEM_WRITE_LONG( R_EDX, R_EAX ); // 12
2860 JMP_rel8( 48, end );
2861 JMP_TARGET(doublesize);
2862 load_xf_bank( R_ECX );
2863 load_fr( R_ECX, R_EAX, FRm&0x0E );
2864 load_fr( R_ECX, R_ECX, FRm|0x01 );
2865 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2868 JMP_rel8( 39, end );
2869 JMP_TARGET(doublesize);
2870 load_fr_bank( R_ECX );
2871 load_fr( R_ECX, R_EAX, FRm&0x0E );
2872 load_fr( R_ECX, R_ECX, FRm|0x01 );
2873 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2879 { /* FMOV FRm, @-Rn */
2880 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2882 load_reg( R_EDX, Rn );
2883 check_walign32( R_EDX );
2884 load_spreg( R_ECX, R_FPSCR );
2885 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2886 JNE_rel8(26, doublesize);
2887 load_fr_bank( R_ECX );
2888 load_fr( R_ECX, R_EAX, FRm );
2889 ADD_imm8s_r32(-4,R_EDX);
2890 store_reg( R_EDX, Rn );
2891 MEM_WRITE_LONG( R_EDX, R_EAX ); // 12
2893 JMP_rel8( 54, end );
2894 JMP_TARGET(doublesize);
2895 load_xf_bank( R_ECX );
2896 load_fr( R_ECX, R_EAX, FRm&0x0E );
2897 load_fr( R_ECX, R_ECX, FRm|0x01 );
2898 ADD_imm8s_r32(-8,R_EDX);
2899 store_reg( R_EDX, Rn );
2900 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2903 JMP_rel8( 45, end );
2904 JMP_TARGET(doublesize);
2905 load_fr_bank( R_ECX );
2906 load_fr( R_ECX, R_EAX, FRm&0x0E );
2907 load_fr( R_ECX, R_ECX, FRm|0x01 );
2908 ADD_imm8s_r32(-8,R_EDX);
2909 store_reg( R_EDX, Rn );
2910 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2916 { /* FMOV FRm, FRn */
2917 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2918 /* As horrible as this looks, it's actually covering 5 separate cases:
2919 * 1. 32-bit fr-to-fr (PR=0)
2920 * 2. 64-bit dr-to-dr (PR=1, FRm&1 == 0, FRn&1 == 0 )
2921 * 3. 64-bit dr-to-xd (PR=1, FRm&1 == 0, FRn&1 == 1 )
2922 * 4. 64-bit xd-to-dr (PR=1, FRm&1 == 1, FRn&1 == 0 )
2923 * 5. 64-bit xd-to-xd (PR=1, FRm&1 == 1, FRn&1 == 1 )
2926 load_spreg( R_ECX, R_FPSCR );
2927 load_fr_bank( R_EDX );
2928 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2929 JNE_rel8(8, doublesize);
2930 load_fr( R_EDX, R_EAX, FRm ); // PR=0 branch
2931 store_fr( R_EDX, R_EAX, FRn );
2934 JMP_TARGET(doublesize);
2935 load_xf_bank( R_ECX );
2936 load_fr( R_ECX, R_EAX, FRm-1 );
2938 load_fr( R_ECX, R_EDX, FRm );
2939 store_fr( R_ECX, R_EAX, FRn-1 );
2940 store_fr( R_ECX, R_EDX, FRn );
2941 } else /* FRn&1 == 0 */ {
2942 load_fr( R_ECX, R_ECX, FRm );
2943 store_fr( R_EDX, R_EAX, FRn );
2944 store_fr( R_EDX, R_ECX, FRn+1 );
2947 } else /* FRm&1 == 0 */ {
2950 load_xf_bank( R_ECX );
2951 load_fr( R_EDX, R_EAX, FRm );
2952 load_fr( R_EDX, R_EDX, FRm+1 );
2953 store_fr( R_ECX, R_EAX, FRn-1 );
2954 store_fr( R_ECX, R_EDX, FRn );
2956 } else /* FRn&1 == 0 */ {
2958 load_fr( R_EDX, R_EAX, FRm );
2959 load_fr( R_EDX, R_ECX, FRm+1 );
2960 store_fr( R_EDX, R_EAX, FRn );
2961 store_fr( R_EDX, R_ECX, FRn+1 );
2968 switch( (ir&0xF0) >> 4 ) {
2970 { /* FSTS FPUL, FRn */
2971 uint32_t FRn = ((ir>>8)&0xF);
2973 load_fr_bank( R_ECX );
2974 load_spreg( R_EAX, R_FPUL );
2975 store_fr( R_ECX, R_EAX, FRn );
2979 { /* FLDS FRm, FPUL */
2980 uint32_t FRm = ((ir>>8)&0xF);
2982 load_fr_bank( R_ECX );
2983 load_fr( R_ECX, R_EAX, FRm );
2984 store_spreg( R_EAX, R_FPUL );
2988 { /* FLOAT FPUL, FRn */
2989 uint32_t FRn = ((ir>>8)&0xF);
2991 load_spreg( R_ECX, R_FPSCR );
2992 load_spreg(R_EDX, REG_OFFSET(fr_bank));
2994 TEST_imm32_r32( FPSCR_PR, R_ECX );
2995 JNE_rel8(5, doubleprec);
2996 pop_fr( R_EDX, FRn );
2998 JMP_TARGET(doubleprec);
2999 pop_dr( R_EDX, FRn );
3004 { /* FTRC FRm, FPUL */
3005 uint32_t FRm = ((ir>>8)&0xF);
3007 load_spreg( R_ECX, R_FPSCR );
3008 load_fr_bank( R_EDX );
3009 TEST_imm32_r32( FPSCR_PR, R_ECX );
3010 JNE_rel8(5, doubleprec);
3011 push_fr( R_EDX, FRm );
3013 JMP_TARGET(doubleprec);
3014 push_dr( R_EDX, FRm );
3016 load_imm32( R_ECX, (uint32_t)&max_int );
3017 FILD_r32ind( R_ECX );
3019 JNA_rel8( 32, sat );
3020 load_imm32( R_ECX, (uint32_t)&min_int ); // 5
3021 FILD_r32ind( R_ECX ); // 2
3023 JAE_rel8( 21, sat2 ); // 2
3024 load_imm32( R_EAX, (uint32_t)&save_fcw );
3025 FNSTCW_r32ind( R_EAX );
3026 load_imm32( R_EDX, (uint32_t)&trunc_fcw );
3027 FLDCW_r32ind( R_EDX );
3028 FISTP_sh4r(R_FPUL); // 3
3029 FLDCW_r32ind( R_EAX );
3030 JMP_rel8( 9, end ); // 2
3034 MOV_r32ind_r32( R_ECX, R_ECX ); // 2
3035 store_spreg( R_ECX, R_FPUL );
3042 uint32_t FRn = ((ir>>8)&0xF);
3044 load_spreg( R_ECX, R_FPSCR );
3045 TEST_imm32_r32( FPSCR_PR, R_ECX );
3046 load_fr_bank( R_EDX );
3047 JNE_rel8(10, doubleprec);
3048 push_fr(R_EDX, FRn);
3052 JMP_TARGET(doubleprec);
3053 push_dr(R_EDX, FRn);
3061 uint32_t FRn = ((ir>>8)&0xF);
3063 load_spreg( R_ECX, R_FPSCR );
3064 load_fr_bank( R_EDX );
3065 TEST_imm32_r32( FPSCR_PR, R_ECX );
3066 JNE_rel8(10, doubleprec);
3067 push_fr(R_EDX, FRn); // 3
3069 pop_fr( R_EDX, FRn); //3
3070 JMP_rel8(8,end); // 2
3071 JMP_TARGET(doubleprec);
3072 push_dr(R_EDX, FRn);
3080 uint32_t FRn = ((ir>>8)&0xF);
3082 load_spreg( R_ECX, R_FPSCR );
3083 TEST_imm32_r32( FPSCR_PR, R_ECX );
3084 load_fr_bank( R_EDX );
3085 JNE_rel8(10, doubleprec);
3086 push_fr(R_EDX, FRn);
3090 JMP_TARGET(doubleprec);
3091 push_dr(R_EDX, FRn);
3099 uint32_t FRn = ((ir>>8)&0xF);
3101 load_spreg( R_ECX, R_FPSCR );
3102 TEST_imm32_r32( FPSCR_PR, R_ECX );
3103 load_fr_bank( R_EDX );
3104 JNE_rel8(12, end); // PR=0 only
3106 push_fr(R_EDX, FRn);
3115 uint32_t FRn = ((ir>>8)&0xF);
3118 load_spreg( R_ECX, R_FPSCR );
3119 TEST_imm32_r32( FPSCR_PR, R_ECX );
3121 XOR_r32_r32( R_EAX, R_EAX );
3122 load_spreg( R_ECX, REG_OFFSET(fr_bank) );
3123 store_fr( R_ECX, R_EAX, FRn );
3129 uint32_t FRn = ((ir>>8)&0xF);
3132 load_spreg( R_ECX, R_FPSCR );
3133 TEST_imm32_r32( FPSCR_PR, R_ECX );
3135 load_imm32(R_EAX, 0x3F800000);
3136 load_spreg( R_ECX, REG_OFFSET(fr_bank) );
3137 store_fr( R_ECX, R_EAX, FRn );
3142 { /* FCNVSD FPUL, FRn */
3143 uint32_t FRn = ((ir>>8)&0xF);
3145 load_spreg( R_ECX, R_FPSCR );
3146 TEST_imm32_r32( FPSCR_PR, R_ECX );
3147 JE_rel8(9, end); // only when PR=1
3148 load_fr_bank( R_ECX );
3150 pop_dr( R_ECX, FRn );
3155 { /* FCNVDS FRm, FPUL */
3156 uint32_t FRm = ((ir>>8)&0xF);
3158 load_spreg( R_ECX, R_FPSCR );
3159 TEST_imm32_r32( FPSCR_PR, R_ECX );
3160 JE_rel8(9, end); // only when PR=1
3161 load_fr_bank( R_ECX );
3162 push_dr( R_ECX, FRm );
3168 { /* FIPR FVm, FVn */
3169 uint32_t FVn = ((ir>>10)&0x3); uint32_t FVm = ((ir>>8)&0x3);
3171 load_spreg( R_ECX, R_FPSCR );
3172 TEST_imm32_r32( FPSCR_PR, R_ECX );
3173 JNE_rel8(44, doubleprec);
3175 load_fr_bank( R_ECX );
3176 push_fr( R_ECX, FVm<<2 );
3177 push_fr( R_ECX, FVn<<2 );
3179 push_fr( R_ECX, (FVm<<2)+1);
3180 push_fr( R_ECX, (FVn<<2)+1);
3183 push_fr( R_ECX, (FVm<<2)+2);
3184 push_fr( R_ECX, (FVn<<2)+2);
3187 push_fr( R_ECX, (FVm<<2)+3);
3188 push_fr( R_ECX, (FVn<<2)+3);
3191 pop_fr( R_ECX, (FVn<<2)+3);
3192 JMP_TARGET(doubleprec);
3196 switch( (ir&0x100) >> 8 ) {
3198 { /* FSCA FPUL, FRn */
3199 uint32_t FRn = ((ir>>9)&0x7)<<1;
3201 load_spreg( R_ECX, R_FPSCR );
3202 TEST_imm32_r32( FPSCR_PR, R_ECX );
3203 JNE_rel8( 21, doubleprec );
3204 load_fr_bank( R_ECX );
3205 ADD_imm8s_r32( (FRn&0x0E)<<2, R_ECX );
3206 load_spreg( R_EDX, R_FPUL );
3207 call_func2( sh4_fsca, R_EDX, R_ECX );
3208 JMP_TARGET(doubleprec);
3212 switch( (ir&0x200) >> 9 ) {
3214 { /* FTRV XMTRX, FVn */
3215 uint32_t FVn = ((ir>>10)&0x3);
3217 load_spreg( R_ECX, R_FPSCR );
3218 TEST_imm32_r32( FPSCR_PR, R_ECX );
3219 JNE_rel8( 30, doubleprec );
3220 load_fr_bank( R_EDX ); // 3
3221 ADD_imm8s_r32( FVn<<4, R_EDX ); // 3
3222 load_xf_bank( R_ECX ); // 12
3223 call_func2( sh4_ftrv, R_EDX, R_ECX ); // 12
3224 JMP_TARGET(doubleprec);
3228 switch( (ir&0xC00) >> 10 ) {
3232 load_spreg( R_ECX, R_FPSCR );
3233 XOR_imm32_r32( FPSCR_SZ, R_ECX );
3234 store_spreg( R_ECX, R_FPSCR );
3240 load_spreg( R_ECX, R_FPSCR );
3241 XOR_imm32_r32( FPSCR_FR, R_ECX );
3242 store_spreg( R_ECX, R_FPSCR );
3243 update_fr_bank( R_ECX );
3248 if( sh4_x86.in_delay_slot ) {
3251 JMP_exit(EXIT_ILLEGAL);
3271 { /* FMAC FR0, FRm, FRn */
3272 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
3274 load_spreg( R_ECX, R_FPSCR );
3275 load_spreg( R_EDX, REG_OFFSET(fr_bank));
3276 TEST_imm32_r32( FPSCR_PR, R_ECX );
3277 JNE_rel8(18, doubleprec);
3278 push_fr( R_EDX, 0 );
3279 push_fr( R_EDX, FRm );
3281 push_fr( R_EDX, FRn );
3283 pop_fr( R_EDX, FRn );
3285 JMP_TARGET(doubleprec);
3286 push_dr( R_EDX, 0 );
3287 push_dr( R_EDX, FRm );
3289 push_dr( R_EDX, FRn );
3291 pop_dr( R_EDX, FRn );
3302 if( sh4_x86.in_delay_slot ) {
3303 ADD_imm8s_r32(2,R_ESI);
3304 sh4_x86.in_delay_slot = FALSE;
.