filename | src/sh4/sh4x86.c |
changeset | 388:13bae2fb0373 |
prev | 386:6fb10951326a |
next | 394:7eb172bfeefe |
author | nkeynes |
date | Tue Sep 18 09:12:30 2007 +0000 (16 years ago) |
permissions | -rw-r--r-- |
last change | Ensure correct end-of-block PC Handle syscalls |
view | annotate | diff | log | raw |
1 /**
2 * $Id: sh4x86.c,v 1.10 2007-09-18 08:59:00 nkeynes Exp $
3 *
4 * SH4 => x86 translation. This version does no real optimization, it just
5 * outputs straight-line x86 code - it mainly exists to provide a baseline
6 * to test the optimizing versions against.
7 *
8 * Copyright (c) 2007 Nathan Keynes.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
21 #include <assert.h>
22 #include <math.h>
24 #ifndef NDEBUG
25 #define DEBUG_JUMPS 1
26 #endif
28 #include "sh4/sh4core.h"
29 #include "sh4/sh4trans.h"
30 #include "sh4/sh4mmio.h"
31 #include "sh4/x86op.h"
32 #include "clock.h"
34 #define DEFAULT_BACKPATCH_SIZE 4096
36 /**
37 * Struct to manage internal translation state. This state is not saved -
38 * it is only valid between calls to sh4_translate_begin_block() and
39 * sh4_translate_end_block()
40 */
41 struct sh4_x86_state {
42 gboolean in_delay_slot;
43 gboolean priv_checked; /* true if we've already checked the cpu mode. */
44 gboolean fpuen_checked; /* true if we've already checked fpu enabled. */
45 int exit_code;
47 /* Allocated memory for the (block-wide) back-patch list */
48 uint32_t **backpatch_list;
49 uint32_t backpatch_posn;
50 uint32_t backpatch_size;
51 };
53 #define EXIT_DATA_ADDR_READ 0
54 #define EXIT_DATA_ADDR_WRITE 7
55 #define EXIT_ILLEGAL 14
56 #define EXIT_SLOT_ILLEGAL 21
57 #define EXIT_FPU_DISABLED 28
58 #define EXIT_SLOT_FPU_DISABLED 35
60 static struct sh4_x86_state sh4_x86;
62 static uint32_t max_int = 0x7FFFFFFF;
63 static uint32_t min_int = 0x80000000;
64 void signsat48( void )
65 {
66 if( ((int64_t)sh4r.mac) < (int64_t)0xFFFF800000000000LL )
67 sh4r.mac = 0xFFFF800000000000LL;
68 else if( ((int64_t)sh4r.mac) > (int64_t)0x00007FFFFFFFFFFFLL )
69 sh4r.mac = 0x00007FFFFFFFFFFFLL;
70 }
72 void sh4_fsca( uint32_t anglei, float *fr )
73 {
74 float angle = (((float)(anglei&0xFFFF))/65536.0) * 2 * M_PI;
75 *fr++ = cosf(angle);
76 *fr = sinf(angle);
77 }
79 void sh4_sleep()
80 {
81 if( MMIO_READ( CPG, STBCR ) & 0x80 ) {
82 sh4r.sh4_state = SH4_STATE_STANDBY;
83 } else {
84 sh4r.sh4_state = SH4_STATE_SLEEP;
85 }
86 }
88 /**
89 * Compute the matrix tranform of fv given the matrix xf.
90 * Both fv and xf are word-swapped as per the sh4r.fr banks
91 */
92 void sh4_ftrv( float *target, float *xf )
93 {
94 float fv[4] = { target[1], target[0], target[3], target[2] };
95 target[1] = xf[1] * fv[0] + xf[5]*fv[1] +
96 xf[9]*fv[2] + xf[13]*fv[3];
97 target[0] = xf[0] * fv[0] + xf[4]*fv[1] +
98 xf[8]*fv[2] + xf[12]*fv[3];
99 target[3] = xf[3] * fv[0] + xf[7]*fv[1] +
100 xf[11]*fv[2] + xf[15]*fv[3];
101 target[2] = xf[2] * fv[0] + xf[6]*fv[1] +
102 xf[10]*fv[2] + xf[14]*fv[3];
103 }
107 void sh4_x86_init()
108 {
109 sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE);
110 sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(uint32_t *);
111 }
114 static void sh4_x86_add_backpatch( uint8_t *ptr )
115 {
116 if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) {
117 sh4_x86.backpatch_size <<= 1;
118 sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list, sh4_x86.backpatch_size * sizeof(uint32_t *) );
119 assert( sh4_x86.backpatch_list != NULL );
120 }
121 sh4_x86.backpatch_list[sh4_x86.backpatch_posn++] = (uint32_t *)ptr;
122 }
124 static void sh4_x86_do_backpatch( uint8_t *reloc_base )
125 {
126 unsigned int i;
127 for( i=0; i<sh4_x86.backpatch_posn; i++ ) {
128 *sh4_x86.backpatch_list[i] += (reloc_base - ((uint8_t *)sh4_x86.backpatch_list[i]) - 4);
129 }
130 }
132 /**
133 * Emit an instruction to load an SH4 reg into a real register
134 */
135 static inline void load_reg( int x86reg, int sh4reg )
136 {
137 /* mov [bp+n], reg */
138 OP(0x8B);
139 OP(0x45 + (x86reg<<3));
140 OP(REG_OFFSET(r[sh4reg]));
141 }
143 static inline void load_reg16s( int x86reg, int sh4reg )
144 {
145 OP(0x0F);
146 OP(0xBF);
147 MODRM_r32_sh4r(x86reg, REG_OFFSET(r[sh4reg]));
148 }
150 static inline void load_reg16u( int x86reg, int sh4reg )
151 {
152 OP(0x0F);
153 OP(0xB7);
154 MODRM_r32_sh4r(x86reg, REG_OFFSET(r[sh4reg]));
156 }
158 #define load_spreg( x86reg, regoff ) MOV_sh4r_r32( regoff, x86reg )
159 #define store_spreg( x86reg, regoff ) MOV_r32_sh4r( x86reg, regoff )
160 /**
161 * Emit an instruction to load an immediate value into a register
162 */
163 static inline void load_imm32( int x86reg, uint32_t value ) {
164 /* mov #value, reg */
165 OP(0xB8 + x86reg);
166 OP32(value);
167 }
169 /**
170 * Emit an instruction to store an SH4 reg (RN)
171 */
172 void static inline store_reg( int x86reg, int sh4reg ) {
173 /* mov reg, [bp+n] */
174 OP(0x89);
175 OP(0x45 + (x86reg<<3));
176 OP(REG_OFFSET(r[sh4reg]));
177 }
179 #define load_fr_bank(bankreg) load_spreg( bankreg, REG_OFFSET(fr_bank))
181 /**
182 * Load an FR register (single-precision floating point) into an integer x86
183 * register (eg for register-to-register moves)
184 */
185 void static inline load_fr( int bankreg, int x86reg, int frm )
186 {
187 OP(0x8B); OP(0x40+bankreg+(x86reg<<3)); OP((frm^1)<<2);
188 }
190 /**
191 * Store an FR register (single-precision floating point) into an integer x86
192 * register (eg for register-to-register moves)
193 */
194 void static inline store_fr( int bankreg, int x86reg, int frn )
195 {
196 OP(0x89); OP(0x40+bankreg+(x86reg<<3)); OP((frn^1)<<2);
197 }
200 /**
201 * Load a pointer to the back fp back into the specified x86 register. The
202 * bankreg must have been previously loaded with FPSCR.
203 * NB: 12 bytes
204 */
205 static inline void load_xf_bank( int bankreg )
206 {
207 NOT_r32( bankreg );
208 SHR_imm8_r32( (21 - 6), bankreg ); // Extract bit 21 then *64 for bank size
209 AND_imm8s_r32( 0x40, bankreg ); // Complete extraction
210 OP(0x8D); OP(0x44+(bankreg<<3)); OP(0x28+bankreg); OP(REG_OFFSET(fr)); // LEA [ebp+bankreg+disp], bankreg
211 }
213 /**
214 * Update the fr_bank pointer based on the current fpscr value.
215 */
216 static inline void update_fr_bank( int fpscrreg )
217 {
218 SHR_imm8_r32( (21 - 6), fpscrreg ); // Extract bit 21 then *64 for bank size
219 AND_imm8s_r32( 0x40, fpscrreg ); // Complete extraction
220 OP(0x8D); OP(0x44+(fpscrreg<<3)); OP(0x28+fpscrreg); OP(REG_OFFSET(fr)); // LEA [ebp+fpscrreg+disp], fpscrreg
221 store_spreg( fpscrreg, REG_OFFSET(fr_bank) );
222 }
223 /**
224 * Push FPUL (as a 32-bit float) onto the FPU stack
225 */
226 static inline void push_fpul( )
227 {
228 OP(0xD9); OP(0x45); OP(R_FPUL);
229 }
231 /**
232 * Pop FPUL (as a 32-bit float) from the FPU stack
233 */
234 static inline void pop_fpul( )
235 {
236 OP(0xD9); OP(0x5D); OP(R_FPUL);
237 }
239 /**
240 * Push a 32-bit float onto the FPU stack, with bankreg previously loaded
241 * with the location of the current fp bank.
242 */
243 static inline void push_fr( int bankreg, int frm )
244 {
245 OP(0xD9); OP(0x40 + bankreg); OP((frm^1)<<2); // FLD.S [bankreg + frm^1*4]
246 }
248 /**
249 * Pop a 32-bit float from the FPU stack and store it back into the fp bank,
250 * with bankreg previously loaded with the location of the current fp bank.
251 */
252 static inline void pop_fr( int bankreg, int frm )
253 {
254 OP(0xD9); OP(0x58 + bankreg); OP((frm^1)<<2); // FST.S [bankreg + frm^1*4]
255 }
257 /**
258 * Push a 64-bit double onto the FPU stack, with bankreg previously loaded
259 * with the location of the current fp bank.
260 */
261 static inline void push_dr( int bankreg, int frm )
262 {
263 OP(0xDD); OP(0x40 + bankreg); OP(frm<<2); // FLD.D [bankreg + frm*4]
264 }
266 static inline void pop_dr( int bankreg, int frm )
267 {
268 OP(0xDD); OP(0x58 + bankreg); OP(frm<<2); // FST.D [bankreg + frm*4]
269 }
271 /**
272 * Note: clobbers EAX to make the indirect call - this isn't usually
273 * a problem since the callee will usually clobber it anyway.
274 */
275 static inline void call_func0( void *ptr )
276 {
277 load_imm32(R_EAX, (uint32_t)ptr);
278 CALL_r32(R_EAX);
279 }
281 static inline void call_func1( void *ptr, int arg1 )
282 {
283 PUSH_r32(arg1);
284 call_func0(ptr);
285 ADD_imm8s_r32( 4, R_ESP );
286 }
288 static inline void call_func2( void *ptr, int arg1, int arg2 )
289 {
290 PUSH_r32(arg2);
291 PUSH_r32(arg1);
292 call_func0(ptr);
293 ADD_imm8s_r32( 8, R_ESP );
294 }
296 /**
297 * Write a double (64-bit) value into memory, with the first word in arg2a, and
298 * the second in arg2b
299 * NB: 30 bytes
300 */
301 static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
302 {
303 ADD_imm8s_r32( 4, addr );
304 PUSH_r32(arg2b);
305 PUSH_r32(addr);
306 ADD_imm8s_r32( -4, addr );
307 PUSH_r32(arg2a);
308 PUSH_r32(addr);
309 call_func0(sh4_write_long);
310 ADD_imm8s_r32( 8, R_ESP );
311 call_func0(sh4_write_long);
312 ADD_imm8s_r32( 8, R_ESP );
313 }
315 /**
316 * Read a double (64-bit) value from memory, writing the first word into arg2a
317 * and the second into arg2b. The addr must not be in EAX
318 * NB: 27 bytes
319 */
320 static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
321 {
322 PUSH_r32(addr);
323 call_func0(sh4_read_long);
324 POP_r32(addr);
325 PUSH_r32(R_EAX);
326 ADD_imm8s_r32( 4, addr );
327 PUSH_r32(addr);
328 call_func0(sh4_read_long);
329 ADD_imm8s_r32( 4, R_ESP );
330 MOV_r32_r32( R_EAX, arg2b );
331 POP_r32(arg2a);
332 }
334 /* Exception checks - Note that all exception checks will clobber EAX */
335 static void check_priv( )
336 {
337 if( !sh4_x86.priv_checked ) {
338 sh4_x86.priv_checked = TRUE;
339 load_spreg( R_EAX, R_SR );
340 AND_imm32_r32( SR_MD, R_EAX );
341 if( sh4_x86.in_delay_slot ) {
342 JE_exit( EXIT_SLOT_ILLEGAL );
343 } else {
344 JE_exit( EXIT_ILLEGAL );
345 }
346 }
347 }
349 static void check_fpuen( )
350 {
351 if( !sh4_x86.fpuen_checked ) {
352 sh4_x86.fpuen_checked = TRUE;
353 load_spreg( R_EAX, R_SR );
354 AND_imm32_r32( SR_FD, R_EAX );
355 if( sh4_x86.in_delay_slot ) {
356 JNE_exit(EXIT_SLOT_FPU_DISABLED);
357 } else {
358 JNE_exit(EXIT_FPU_DISABLED);
359 }
360 }
361 }
363 static void check_ralign16( int x86reg )
364 {
365 TEST_imm32_r32( 0x00000001, x86reg );
366 JNE_exit(EXIT_DATA_ADDR_READ);
367 }
369 static void check_walign16( int x86reg )
370 {
371 TEST_imm32_r32( 0x00000001, x86reg );
372 JNE_exit(EXIT_DATA_ADDR_WRITE);
373 }
375 static void check_ralign32( int x86reg )
376 {
377 TEST_imm32_r32( 0x00000003, x86reg );
378 JNE_exit(EXIT_DATA_ADDR_READ);
379 }
380 static void check_walign32( int x86reg )
381 {
382 TEST_imm32_r32( 0x00000003, x86reg );
383 JNE_exit(EXIT_DATA_ADDR_WRITE);
384 }
386 #define UNDEF()
387 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
388 #define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
389 #define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
390 #define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
391 #define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
392 #define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
393 #define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
395 #define SLOTILLEGAL() JMP_exit(EXIT_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1;
399 /**
400 * Emit the 'start of block' assembly. Sets up the stack frame and save
401 * SI/DI as required
402 */
403 void sh4_translate_begin_block()
404 {
405 PUSH_r32(R_EBP);
406 /* mov &sh4r, ebp */
407 load_imm32( R_EBP, (uint32_t)&sh4r );
408 PUSH_r32(R_EDI);
409 PUSH_r32(R_ESI);
410 XOR_r32_r32(R_ESI, R_ESI);
412 sh4_x86.in_delay_slot = FALSE;
413 sh4_x86.priv_checked = FALSE;
414 sh4_x86.fpuen_checked = FALSE;
415 sh4_x86.backpatch_posn = 0;
416 sh4_x86.exit_code = 1;
417 }
419 /**
420 * Exit the block early (ie branch out), conditionally or otherwise
421 */
422 void exit_block( )
423 {
424 store_spreg( R_EDI, REG_OFFSET(pc) );
425 MOV_moff32_EAX( (uint32_t)&sh4_cpu_period );
426 load_spreg( R_ECX, REG_OFFSET(slice_cycle) );
427 MUL_r32( R_ESI );
428 ADD_r32_r32( R_EAX, R_ECX );
429 store_spreg( R_ECX, REG_OFFSET(slice_cycle) );
430 load_imm32( R_EAX, sh4_x86.exit_code );
431 POP_r32(R_ESI);
432 POP_r32(R_EDI);
433 POP_r32(R_EBP);
434 RET();
435 }
437 /**
438 * Flush any open regs back to memory, restore SI/DI/, update PC, etc
439 */
440 void sh4_translate_end_block( sh4addr_t pc ) {
441 assert( !sh4_x86.in_delay_slot ); // should never stop here
442 // Normal termination - save PC, cycle count
443 exit_block( );
445 if( sh4_x86.backpatch_posn != 0 ) {
446 uint8_t *end_ptr = xlat_output;
447 // Exception termination. Jump block for various exception codes:
448 PUSH_imm32( EXC_DATA_ADDR_READ );
449 JMP_rel8( 33, target1 );
450 PUSH_imm32( EXC_DATA_ADDR_WRITE );
451 JMP_rel8( 26, target2 );
452 PUSH_imm32( EXC_ILLEGAL );
453 JMP_rel8( 19, target3 );
454 PUSH_imm32( EXC_SLOT_ILLEGAL );
455 JMP_rel8( 12, target4 );
456 PUSH_imm32( EXC_FPU_DISABLED );
457 JMP_rel8( 5, target5 );
458 PUSH_imm32( EXC_SLOT_FPU_DISABLED );
459 // target
460 JMP_TARGET(target1);
461 JMP_TARGET(target2);
462 JMP_TARGET(target3);
463 JMP_TARGET(target4);
464 JMP_TARGET(target5);
465 load_spreg( R_ECX, REG_OFFSET(pc) );
466 ADD_r32_r32( R_ESI, R_ECX );
467 ADD_r32_r32( R_ESI, R_ECX );
468 store_spreg( R_ECX, REG_OFFSET(pc) );
469 MOV_moff32_EAX( (uint32_t)&sh4_cpu_period );
470 load_spreg( R_ECX, REG_OFFSET(slice_cycle) );
471 MUL_r32( R_ESI );
472 ADD_r32_r32( R_EAX, R_ECX );
473 store_spreg( R_ECX, REG_OFFSET(slice_cycle) );
475 load_imm32( R_EAX, (uint32_t)sh4_raise_exception ); // 6
476 CALL_r32( R_EAX ); // 2
477 ADD_imm8s_r32( 4, R_ESP );
478 POP_r32(R_ESI);
479 POP_r32(R_EDI);
480 POP_r32(R_EBP);
481 RET();
483 sh4_x86_do_backpatch( end_ptr );
484 }
486 }
489 extern uint16_t *sh4_icache;
490 extern uint32_t sh4_icache_addr;
492 /**
493 * Translate a single instruction. Delayed branches are handled specially
494 * by translating both branch and delayed instruction as a single unit (as
495 *
496 *
497 * @return true if the instruction marks the end of a basic block
498 * (eg a branch or
499 */
500 uint32_t sh4_x86_translate_instruction( uint32_t pc )
501 {
502 uint32_t ir;
503 /* Read instruction */
504 uint32_t pageaddr = pc >> 12;
505 if( sh4_icache != NULL && pageaddr == sh4_icache_addr ) {
506 ir = sh4_icache[(pc&0xFFF)>>1];
507 } else {
508 sh4_icache = (uint16_t *)mem_get_page(pc);
509 if( ((uint32_t)sh4_icache) < MAX_IO_REGIONS ) {
510 /* If someone's actually been so daft as to try to execute out of an IO
511 * region, fallback on the full-blown memory read
512 */
513 sh4_icache = NULL;
514 ir = sh4_read_word(pc);
515 } else {
516 sh4_icache_addr = pageaddr;
517 ir = sh4_icache[(pc&0xFFF)>>1];
518 }
519 }
521 switch( (ir&0xF000) >> 12 ) {
522 case 0x0:
523 switch( ir&0xF ) {
524 case 0x2:
525 switch( (ir&0x80) >> 7 ) {
526 case 0x0:
527 switch( (ir&0x70) >> 4 ) {
528 case 0x0:
529 { /* STC SR, Rn */
530 uint32_t Rn = ((ir>>8)&0xF);
531 check_priv();
532 call_func0(sh4_read_sr);
533 store_reg( R_EAX, Rn );
534 }
535 break;
536 case 0x1:
537 { /* STC GBR, Rn */
538 uint32_t Rn = ((ir>>8)&0xF);
539 load_spreg( R_EAX, R_GBR );
540 store_reg( R_EAX, Rn );
541 }
542 break;
543 case 0x2:
544 { /* STC VBR, Rn */
545 uint32_t Rn = ((ir>>8)&0xF);
546 check_priv();
547 load_spreg( R_EAX, R_VBR );
548 store_reg( R_EAX, Rn );
549 }
550 break;
551 case 0x3:
552 { /* STC SSR, Rn */
553 uint32_t Rn = ((ir>>8)&0xF);
554 check_priv();
555 load_spreg( R_EAX, R_SSR );
556 store_reg( R_EAX, Rn );
557 }
558 break;
559 case 0x4:
560 { /* STC SPC, Rn */
561 uint32_t Rn = ((ir>>8)&0xF);
562 check_priv();
563 load_spreg( R_EAX, R_SPC );
564 store_reg( R_EAX, Rn );
565 }
566 break;
567 default:
568 UNDEF();
569 break;
570 }
571 break;
572 case 0x1:
573 { /* STC Rm_BANK, Rn */
574 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm_BANK = ((ir>>4)&0x7);
575 check_priv();
576 load_spreg( R_EAX, REG_OFFSET(r_bank[Rm_BANK]) );
577 store_reg( R_EAX, Rn );
578 }
579 break;
580 }
581 break;
582 case 0x3:
583 switch( (ir&0xF0) >> 4 ) {
584 case 0x0:
585 { /* BSRF Rn */
586 uint32_t Rn = ((ir>>8)&0xF);
587 if( sh4_x86.in_delay_slot ) {
588 SLOTILLEGAL();
589 } else {
590 load_imm32( R_EAX, pc + 4 );
591 store_spreg( R_EAX, R_PR );
592 load_reg( R_EDI, Rn );
593 ADD_r32_r32( R_EAX, R_EDI );
594 sh4_x86.in_delay_slot = TRUE;
595 return 0;
596 }
597 }
598 break;
599 case 0x2:
600 { /* BRAF Rn */
601 uint32_t Rn = ((ir>>8)&0xF);
602 if( sh4_x86.in_delay_slot ) {
603 SLOTILLEGAL();
604 } else {
605 load_reg( R_EDI, Rn );
606 ADD_imm32_r32( pc + 4, R_EDI );
607 sh4_x86.in_delay_slot = TRUE;
608 return 0;
609 }
610 }
611 break;
612 case 0x8:
613 { /* PREF @Rn */
614 uint32_t Rn = ((ir>>8)&0xF);
615 load_reg( R_EAX, Rn );
616 PUSH_r32( R_EAX );
617 AND_imm32_r32( 0xFC000000, R_EAX );
618 CMP_imm32_r32( 0xE0000000, R_EAX );
619 JNE_rel8(7, end);
620 call_func0( sh4_flush_store_queue );
621 JMP_TARGET(end);
622 ADD_imm8s_r32( 4, R_ESP );
623 }
624 break;
625 case 0x9:
626 { /* OCBI @Rn */
627 uint32_t Rn = ((ir>>8)&0xF);
628 }
629 break;
630 case 0xA:
631 { /* OCBP @Rn */
632 uint32_t Rn = ((ir>>8)&0xF);
633 }
634 break;
635 case 0xB:
636 { /* OCBWB @Rn */
637 uint32_t Rn = ((ir>>8)&0xF);
638 }
639 break;
640 case 0xC:
641 { /* MOVCA.L R0, @Rn */
642 uint32_t Rn = ((ir>>8)&0xF);
643 load_reg( R_EAX, 0 );
644 load_reg( R_ECX, Rn );
645 check_walign32( R_ECX );
646 MEM_WRITE_LONG( R_ECX, R_EAX );
647 }
648 break;
649 default:
650 UNDEF();
651 break;
652 }
653 break;
654 case 0x4:
655 { /* MOV.B Rm, @(R0, Rn) */
656 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
657 load_reg( R_EAX, 0 );
658 load_reg( R_ECX, Rn );
659 ADD_r32_r32( R_EAX, R_ECX );
660 load_reg( R_EAX, Rm );
661 MEM_WRITE_BYTE( R_ECX, R_EAX );
662 }
663 break;
664 case 0x5:
665 { /* MOV.W Rm, @(R0, Rn) */
666 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
667 load_reg( R_EAX, 0 );
668 load_reg( R_ECX, Rn );
669 ADD_r32_r32( R_EAX, R_ECX );
670 check_walign16( R_ECX );
671 load_reg( R_EAX, Rm );
672 MEM_WRITE_WORD( R_ECX, R_EAX );
673 }
674 break;
675 case 0x6:
676 { /* MOV.L Rm, @(R0, Rn) */
677 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
678 load_reg( R_EAX, 0 );
679 load_reg( R_ECX, Rn );
680 ADD_r32_r32( R_EAX, R_ECX );
681 check_walign32( R_ECX );
682 load_reg( R_EAX, Rm );
683 MEM_WRITE_LONG( R_ECX, R_EAX );
684 }
685 break;
686 case 0x7:
687 { /* MUL.L Rm, Rn */
688 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
689 load_reg( R_EAX, Rm );
690 load_reg( R_ECX, Rn );
691 MUL_r32( R_ECX );
692 store_spreg( R_EAX, R_MACL );
693 }
694 break;
695 case 0x8:
696 switch( (ir&0xFF0) >> 4 ) {
697 case 0x0:
698 { /* CLRT */
699 CLC();
700 SETC_t();
701 }
702 break;
703 case 0x1:
704 { /* SETT */
705 STC();
706 SETC_t();
707 }
708 break;
709 case 0x2:
710 { /* CLRMAC */
711 XOR_r32_r32(R_EAX, R_EAX);
712 store_spreg( R_EAX, R_MACL );
713 store_spreg( R_EAX, R_MACH );
714 }
715 break;
716 case 0x3:
717 { /* LDTLB */
718 }
719 break;
720 case 0x4:
721 { /* CLRS */
722 CLC();
723 SETC_sh4r(R_S);
724 }
725 break;
726 case 0x5:
727 { /* SETS */
728 STC();
729 SETC_sh4r(R_S);
730 }
731 break;
732 default:
733 UNDEF();
734 break;
735 }
736 break;
737 case 0x9:
738 switch( (ir&0xF0) >> 4 ) {
739 case 0x0:
740 { /* NOP */
741 /* Do nothing. Well, we could emit an 0x90, but what would really be the point? */
742 }
743 break;
744 case 0x1:
745 { /* DIV0U */
746 XOR_r32_r32( R_EAX, R_EAX );
747 store_spreg( R_EAX, R_Q );
748 store_spreg( R_EAX, R_M );
749 store_spreg( R_EAX, R_T );
750 }
751 break;
752 case 0x2:
753 { /* MOVT Rn */
754 uint32_t Rn = ((ir>>8)&0xF);
755 load_spreg( R_EAX, R_T );
756 store_reg( R_EAX, Rn );
757 }
758 break;
759 default:
760 UNDEF();
761 break;
762 }
763 break;
764 case 0xA:
765 switch( (ir&0xF0) >> 4 ) {
766 case 0x0:
767 { /* STS MACH, Rn */
768 uint32_t Rn = ((ir>>8)&0xF);
769 load_spreg( R_EAX, R_MACH );
770 store_reg( R_EAX, Rn );
771 }
772 break;
773 case 0x1:
774 { /* STS MACL, Rn */
775 uint32_t Rn = ((ir>>8)&0xF);
776 load_spreg( R_EAX, R_MACL );
777 store_reg( R_EAX, Rn );
778 }
779 break;
780 case 0x2:
781 { /* STS PR, Rn */
782 uint32_t Rn = ((ir>>8)&0xF);
783 load_spreg( R_EAX, R_PR );
784 store_reg( R_EAX, Rn );
785 }
786 break;
787 case 0x3:
788 { /* STC SGR, Rn */
789 uint32_t Rn = ((ir>>8)&0xF);
790 check_priv();
791 load_spreg( R_EAX, R_SGR );
792 store_reg( R_EAX, Rn );
793 }
794 break;
795 case 0x5:
796 { /* STS FPUL, Rn */
797 uint32_t Rn = ((ir>>8)&0xF);
798 load_spreg( R_EAX, R_FPUL );
799 store_reg( R_EAX, Rn );
800 }
801 break;
802 case 0x6:
803 { /* STS FPSCR, Rn */
804 uint32_t Rn = ((ir>>8)&0xF);
805 load_spreg( R_EAX, R_FPSCR );
806 store_reg( R_EAX, Rn );
807 }
808 break;
809 case 0xF:
810 { /* STC DBR, Rn */
811 uint32_t Rn = ((ir>>8)&0xF);
812 check_priv();
813 load_spreg( R_EAX, R_DBR );
814 store_reg( R_EAX, Rn );
815 }
816 break;
817 default:
818 UNDEF();
819 break;
820 }
821 break;
822 case 0xB:
823 switch( (ir&0xFF0) >> 4 ) {
824 case 0x0:
825 { /* RTS */
826 if( sh4_x86.in_delay_slot ) {
827 SLOTILLEGAL();
828 } else {
829 load_spreg( R_EDI, R_PR );
830 sh4_x86.in_delay_slot = TRUE;
831 return 0;
832 }
833 }
834 break;
835 case 0x1:
836 { /* SLEEP */
837 check_priv();
838 call_func0( sh4_sleep );
839 sh4_x86.exit_code = 0;
840 sh4_x86.in_delay_slot = FALSE;
841 return 1;
842 }
843 break;
844 case 0x2:
845 { /* RTE */
846 check_priv();
847 if( sh4_x86.in_delay_slot ) {
848 SLOTILLEGAL();
849 } else {
850 load_spreg( R_EDI, R_SPC );
851 load_spreg( R_EAX, R_SSR );
852 call_func1( sh4_write_sr, R_EAX );
853 sh4_x86.in_delay_slot = TRUE;
854 sh4_x86.priv_checked = FALSE;
855 sh4_x86.fpuen_checked = FALSE;
856 return 0;
857 }
858 }
859 break;
860 default:
861 UNDEF();
862 break;
863 }
864 break;
865 case 0xC:
866 { /* MOV.B @(R0, Rm), Rn */
867 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
868 load_reg( R_EAX, 0 );
869 load_reg( R_ECX, Rm );
870 ADD_r32_r32( R_EAX, R_ECX );
871 MEM_READ_BYTE( R_ECX, R_EAX );
872 store_reg( R_EAX, Rn );
873 }
874 break;
875 case 0xD:
876 { /* MOV.W @(R0, Rm), Rn */
877 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
878 load_reg( R_EAX, 0 );
879 load_reg( R_ECX, Rm );
880 ADD_r32_r32( R_EAX, R_ECX );
881 check_ralign16( R_ECX );
882 MEM_READ_WORD( R_ECX, R_EAX );
883 store_reg( R_EAX, Rn );
884 }
885 break;
886 case 0xE:
887 { /* MOV.L @(R0, Rm), Rn */
888 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
889 load_reg( R_EAX, 0 );
890 load_reg( R_ECX, Rm );
891 ADD_r32_r32( R_EAX, R_ECX );
892 check_ralign32( R_ECX );
893 MEM_READ_LONG( R_ECX, R_EAX );
894 store_reg( R_EAX, Rn );
895 }
896 break;
897 case 0xF:
898 { /* MAC.L @Rm+, @Rn+ */
899 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
900 load_reg( R_ECX, Rm );
901 check_ralign32( R_ECX );
902 load_reg( R_ECX, Rn );
903 check_ralign32( R_ECX );
904 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
905 MEM_READ_LONG( R_ECX, R_EAX );
906 PUSH_r32( R_EAX );
907 load_reg( R_ECX, Rm );
908 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
909 MEM_READ_LONG( R_ECX, R_EAX );
910 POP_r32( R_ECX );
911 IMUL_r32( R_ECX );
912 ADD_r32_sh4r( R_EAX, R_MACL );
913 ADC_r32_sh4r( R_EDX, R_MACH );
915 load_spreg( R_ECX, R_S );
916 TEST_r32_r32(R_ECX, R_ECX);
917 JE_rel8( 7, nosat );
918 call_func0( signsat48 );
919 JMP_TARGET( nosat );
920 }
921 break;
922 default:
923 UNDEF();
924 break;
925 }
926 break;
927 case 0x1:
928 { /* MOV.L Rm, @(disp, Rn) */
929 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2;
930 load_reg( R_ECX, Rn );
931 load_reg( R_EAX, Rm );
932 ADD_imm32_r32( disp, R_ECX );
933 check_walign32( R_ECX );
934 MEM_WRITE_LONG( R_ECX, R_EAX );
935 }
936 break;
937 case 0x2:
938 switch( ir&0xF ) {
939 case 0x0:
940 { /* MOV.B Rm, @Rn */
941 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
942 load_reg( R_EAX, Rm );
943 load_reg( R_ECX, Rn );
944 MEM_WRITE_BYTE( R_ECX, R_EAX );
945 }
946 break;
947 case 0x1:
948 { /* MOV.W Rm, @Rn */
949 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
950 load_reg( R_ECX, Rn );
951 check_walign16( R_ECX );
952 load_reg( R_EAX, Rm );
953 MEM_WRITE_WORD( R_ECX, R_EAX );
954 }
955 break;
956 case 0x2:
957 { /* MOV.L Rm, @Rn */
958 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
959 load_reg( R_EAX, Rm );
960 load_reg( R_ECX, Rn );
961 check_walign32(R_ECX);
962 MEM_WRITE_LONG( R_ECX, R_EAX );
963 }
964 break;
965 case 0x4:
966 { /* MOV.B Rm, @-Rn */
967 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
968 load_reg( R_EAX, Rm );
969 load_reg( R_ECX, Rn );
970 ADD_imm8s_r32( -1, R_ECX );
971 store_reg( R_ECX, Rn );
972 MEM_WRITE_BYTE( R_ECX, R_EAX );
973 }
974 break;
975 case 0x5:
976 { /* MOV.W Rm, @-Rn */
977 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
978 load_reg( R_ECX, Rn );
979 check_walign16( R_ECX );
980 load_reg( R_EAX, Rm );
981 ADD_imm8s_r32( -2, R_ECX );
982 store_reg( R_ECX, Rn );
983 MEM_WRITE_WORD( R_ECX, R_EAX );
984 }
985 break;
986 case 0x6:
987 { /* MOV.L Rm, @-Rn */
988 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
989 load_reg( R_EAX, Rm );
990 load_reg( R_ECX, Rn );
991 check_walign32( R_ECX );
992 ADD_imm8s_r32( -4, R_ECX );
993 store_reg( R_ECX, Rn );
994 MEM_WRITE_LONG( R_ECX, R_EAX );
995 }
996 break;
997 case 0x7:
998 { /* DIV0S Rm, Rn */
999 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1000 load_reg( R_EAX, Rm );
1001 load_reg( R_ECX, Rn );
1002 SHR_imm8_r32( 31, R_EAX );
1003 SHR_imm8_r32( 31, R_ECX );
1004 store_spreg( R_EAX, R_M );
1005 store_spreg( R_ECX, R_Q );
1006 CMP_r32_r32( R_EAX, R_ECX );
1007 SETNE_t();
1008 }
1009 break;
1010 case 0x8:
1011 { /* TST Rm, Rn */
1012 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1013 load_reg( R_EAX, Rm );
1014 load_reg( R_ECX, Rn );
1015 TEST_r32_r32( R_EAX, R_ECX );
1016 SETE_t();
1017 }
1018 break;
1019 case 0x9:
1020 { /* AND Rm, Rn */
1021 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1022 load_reg( R_EAX, Rm );
1023 load_reg( R_ECX, Rn );
1024 AND_r32_r32( R_EAX, R_ECX );
1025 store_reg( R_ECX, Rn );
1026 }
1027 break;
1028 case 0xA:
1029 { /* XOR Rm, Rn */
1030 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1031 load_reg( R_EAX, Rm );
1032 load_reg( R_ECX, Rn );
1033 XOR_r32_r32( R_EAX, R_ECX );
1034 store_reg( R_ECX, Rn );
1035 }
1036 break;
1037 case 0xB:
1038 { /* OR Rm, Rn */
1039 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1040 load_reg( R_EAX, Rm );
1041 load_reg( R_ECX, Rn );
1042 OR_r32_r32( R_EAX, R_ECX );
1043 store_reg( R_ECX, Rn );
1044 }
1045 break;
1046 case 0xC:
1047 { /* CMP/STR Rm, Rn */
1048 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1049 load_reg( R_EAX, Rm );
1050 load_reg( R_ECX, Rn );
1051 XOR_r32_r32( R_ECX, R_EAX );
1052 TEST_r8_r8( R_AL, R_AL );
1053 JE_rel8(13, target1);
1054 TEST_r8_r8( R_AH, R_AH ); // 2
1055 JE_rel8(9, target2);
1056 SHR_imm8_r32( 16, R_EAX ); // 3
1057 TEST_r8_r8( R_AL, R_AL ); // 2
1058 JE_rel8(2, target3);
1059 TEST_r8_r8( R_AH, R_AH ); // 2
1060 JMP_TARGET(target1);
1061 JMP_TARGET(target2);
1062 JMP_TARGET(target3);
1063 SETE_t();
1064 }
1065 break;
1066 case 0xD:
1067 { /* XTRCT Rm, Rn */
1068 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1069 load_reg( R_EAX, Rm );
1070 MOV_r32_r32( R_EAX, R_ECX );
1071 SHR_imm8_r32( 16, R_EAX );
1072 SHL_imm8_r32( 16, R_ECX );
1073 OR_r32_r32( R_EAX, R_ECX );
1074 store_reg( R_ECX, Rn );
1075 }
1076 break;
1077 case 0xE:
1078 { /* MULU.W Rm, Rn */
1079 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1080 load_reg16u( R_EAX, Rm );
1081 load_reg16u( R_ECX, Rn );
1082 MUL_r32( R_ECX );
1083 store_spreg( R_EAX, R_MACL );
1084 }
1085 break;
1086 case 0xF:
1087 { /* MULS.W Rm, Rn */
1088 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1089 load_reg16s( R_EAX, Rm );
1090 load_reg16s( R_ECX, Rn );
1091 MUL_r32( R_ECX );
1092 store_spreg( R_EAX, R_MACL );
1093 }
1094 break;
1095 default:
1096 UNDEF();
1097 break;
1098 }
1099 break;
1100 case 0x3:
1101 switch( ir&0xF ) {
1102 case 0x0:
1103 { /* CMP/EQ Rm, Rn */
1104 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1105 load_reg( R_EAX, Rm );
1106 load_reg( R_ECX, Rn );
1107 CMP_r32_r32( R_EAX, R_ECX );
1108 SETE_t();
1109 }
1110 break;
1111 case 0x2:
1112 { /* CMP/HS Rm, Rn */
1113 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1114 load_reg( R_EAX, Rm );
1115 load_reg( R_ECX, Rn );
1116 CMP_r32_r32( R_EAX, R_ECX );
1117 SETAE_t();
1118 }
1119 break;
1120 case 0x3:
1121 { /* CMP/GE Rm, Rn */
1122 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1123 load_reg( R_EAX, Rm );
1124 load_reg( R_ECX, Rn );
1125 CMP_r32_r32( R_EAX, R_ECX );
1126 SETGE_t();
1127 }
1128 break;
1129 case 0x4:
1130 { /* DIV1 Rm, Rn */
1131 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1132 load_spreg( R_ECX, R_M );
1133 load_reg( R_EAX, Rn );
1134 LDC_t();
1135 RCL1_r32( R_EAX );
1136 SETC_r8( R_DL ); // Q'
1137 CMP_sh4r_r32( R_Q, R_ECX );
1138 JE_rel8(5, mqequal);
1139 ADD_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
1140 JMP_rel8(3, end);
1141 JMP_TARGET(mqequal);
1142 SUB_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
1143 JMP_TARGET(end);
1144 store_reg( R_EAX, Rn ); // Done with Rn now
1145 SETC_r8(R_AL); // tmp1
1146 XOR_r8_r8( R_DL, R_AL ); // Q' = Q ^ tmp1
1147 XOR_r8_r8( R_AL, R_CL ); // Q'' = Q' ^ M
1148 store_spreg( R_ECX, R_Q );
1149 XOR_imm8s_r32( 1, R_AL ); // T = !Q'
1150 MOVZX_r8_r32( R_AL, R_EAX );
1151 store_spreg( R_EAX, R_T );
1152 }
1153 break;
1154 case 0x5:
1155 { /* DMULU.L Rm, Rn */
1156 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1157 load_reg( R_EAX, Rm );
1158 load_reg( R_ECX, Rn );
1159 MUL_r32(R_ECX);
1160 store_spreg( R_EDX, R_MACH );
1161 store_spreg( R_EAX, R_MACL );
1162 }
1163 break;
1164 case 0x6:
1165 { /* CMP/HI Rm, Rn */
1166 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1167 load_reg( R_EAX, Rm );
1168 load_reg( R_ECX, Rn );
1169 CMP_r32_r32( R_EAX, R_ECX );
1170 SETA_t();
1171 }
1172 break;
1173 case 0x7:
1174 { /* CMP/GT Rm, Rn */
1175 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1176 load_reg( R_EAX, Rm );
1177 load_reg( R_ECX, Rn );
1178 CMP_r32_r32( R_EAX, R_ECX );
1179 SETG_t();
1180 }
1181 break;
1182 case 0x8:
1183 { /* SUB Rm, Rn */
1184 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1185 load_reg( R_EAX, Rm );
1186 load_reg( R_ECX, Rn );
1187 SUB_r32_r32( R_EAX, R_ECX );
1188 store_reg( R_ECX, Rn );
1189 }
1190 break;
1191 case 0xA:
1192 { /* SUBC Rm, Rn */
1193 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1194 load_reg( R_EAX, Rm );
1195 load_reg( R_ECX, Rn );
1196 LDC_t();
1197 SBB_r32_r32( R_EAX, R_ECX );
1198 store_reg( R_ECX, Rn );
1199 }
1200 break;
1201 case 0xB:
1202 { /* SUBV Rm, Rn */
1203 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1204 load_reg( R_EAX, Rm );
1205 load_reg( R_ECX, Rn );
1206 SUB_r32_r32( R_EAX, R_ECX );
1207 store_reg( R_ECX, Rn );
1208 SETO_t();
1209 }
1210 break;
1211 case 0xC:
1212 { /* ADD Rm, Rn */
1213 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1214 load_reg( R_EAX, Rm );
1215 load_reg( R_ECX, Rn );
1216 ADD_r32_r32( R_EAX, R_ECX );
1217 store_reg( R_ECX, Rn );
1218 }
1219 break;
1220 case 0xD:
1221 { /* DMULS.L Rm, Rn */
1222 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1223 load_reg( R_EAX, Rm );
1224 load_reg( R_ECX, Rn );
1225 IMUL_r32(R_ECX);
1226 store_spreg( R_EDX, R_MACH );
1227 store_spreg( R_EAX, R_MACL );
1228 }
1229 break;
1230 case 0xE:
1231 { /* ADDC Rm, Rn */
1232 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1233 load_reg( R_EAX, Rm );
1234 load_reg( R_ECX, Rn );
1235 LDC_t();
1236 ADC_r32_r32( R_EAX, R_ECX );
1237 store_reg( R_ECX, Rn );
1238 SETC_t();
1239 }
1240 break;
1241 case 0xF:
1242 { /* ADDV Rm, Rn */
1243 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1244 load_reg( R_EAX, Rm );
1245 load_reg( R_ECX, Rn );
1246 ADD_r32_r32( R_EAX, R_ECX );
1247 store_reg( R_ECX, Rn );
1248 SETO_t();
1249 }
1250 break;
1251 default:
1252 UNDEF();
1253 break;
1254 }
1255 break;
1256 case 0x4:
1257 switch( ir&0xF ) {
1258 case 0x0:
1259 switch( (ir&0xF0) >> 4 ) {
1260 case 0x0:
1261 { /* SHLL Rn */
1262 uint32_t Rn = ((ir>>8)&0xF);
1263 load_reg( R_EAX, Rn );
1264 SHL1_r32( R_EAX );
1265 store_reg( R_EAX, Rn );
1266 }
1267 break;
1268 case 0x1:
1269 { /* DT Rn */
1270 uint32_t Rn = ((ir>>8)&0xF);
1271 load_reg( R_EAX, Rn );
1272 ADD_imm8s_r32( -1, R_EAX );
1273 store_reg( R_EAX, Rn );
1274 SETE_t();
1275 }
1276 break;
1277 case 0x2:
1278 { /* SHAL Rn */
1279 uint32_t Rn = ((ir>>8)&0xF);
1280 load_reg( R_EAX, Rn );
1281 SHL1_r32( R_EAX );
1282 store_reg( R_EAX, Rn );
1283 }
1284 break;
1285 default:
1286 UNDEF();
1287 break;
1288 }
1289 break;
1290 case 0x1:
1291 switch( (ir&0xF0) >> 4 ) {
1292 case 0x0:
1293 { /* SHLR Rn */
1294 uint32_t Rn = ((ir>>8)&0xF);
1295 load_reg( R_EAX, Rn );
1296 SHR1_r32( R_EAX );
1297 store_reg( R_EAX, Rn );
1298 }
1299 break;
1300 case 0x1:
1301 { /* CMP/PZ Rn */
1302 uint32_t Rn = ((ir>>8)&0xF);
1303 load_reg( R_EAX, Rn );
1304 CMP_imm8s_r32( 0, R_EAX );
1305 SETGE_t();
1306 }
1307 break;
1308 case 0x2:
1309 { /* SHAR Rn */
1310 uint32_t Rn = ((ir>>8)&0xF);
1311 load_reg( R_EAX, Rn );
1312 SAR1_r32( R_EAX );
1313 store_reg( R_EAX, Rn );
1314 }
1315 break;
1316 default:
1317 UNDEF();
1318 break;
1319 }
1320 break;
1321 case 0x2:
1322 switch( (ir&0xF0) >> 4 ) {
1323 case 0x0:
1324 { /* STS.L MACH, @-Rn */
1325 uint32_t Rn = ((ir>>8)&0xF);
1326 load_reg( R_ECX, Rn );
1327 ADD_imm8s_r32( -4, R_ECX );
1328 store_reg( R_ECX, Rn );
1329 load_spreg( R_EAX, R_MACH );
1330 MEM_WRITE_LONG( R_ECX, R_EAX );
1331 }
1332 break;
1333 case 0x1:
1334 { /* STS.L MACL, @-Rn */
1335 uint32_t Rn = ((ir>>8)&0xF);
1336 load_reg( R_ECX, Rn );
1337 ADD_imm8s_r32( -4, R_ECX );
1338 store_reg( R_ECX, Rn );
1339 load_spreg( R_EAX, R_MACL );
1340 MEM_WRITE_LONG( R_ECX, R_EAX );
1341 }
1342 break;
1343 case 0x2:
1344 { /* STS.L PR, @-Rn */
1345 uint32_t Rn = ((ir>>8)&0xF);
1346 load_reg( R_ECX, Rn );
1347 ADD_imm8s_r32( -4, R_ECX );
1348 store_reg( R_ECX, Rn );
1349 load_spreg( R_EAX, R_PR );
1350 MEM_WRITE_LONG( R_ECX, R_EAX );
1351 }
1352 break;
1353 case 0x3:
1354 { /* STC.L SGR, @-Rn */
1355 uint32_t Rn = ((ir>>8)&0xF);
1356 check_priv();
1357 load_reg( R_ECX, Rn );
1358 ADD_imm8s_r32( -4, R_ECX );
1359 store_reg( R_ECX, Rn );
1360 load_spreg( R_EAX, R_SGR );
1361 MEM_WRITE_LONG( R_ECX, R_EAX );
1362 }
1363 break;
1364 case 0x5:
1365 { /* STS.L FPUL, @-Rn */
1366 uint32_t Rn = ((ir>>8)&0xF);
1367 load_reg( R_ECX, Rn );
1368 ADD_imm8s_r32( -4, R_ECX );
1369 store_reg( R_ECX, Rn );
1370 load_spreg( R_EAX, R_FPUL );
1371 MEM_WRITE_LONG( R_ECX, R_EAX );
1372 }
1373 break;
1374 case 0x6:
1375 { /* STS.L FPSCR, @-Rn */
1376 uint32_t Rn = ((ir>>8)&0xF);
1377 load_reg( R_ECX, Rn );
1378 ADD_imm8s_r32( -4, R_ECX );
1379 store_reg( R_ECX, Rn );
1380 load_spreg( R_EAX, R_FPSCR );
1381 MEM_WRITE_LONG( R_ECX, R_EAX );
1382 }
1383 break;
1384 case 0xF:
1385 { /* STC.L DBR, @-Rn */
1386 uint32_t Rn = ((ir>>8)&0xF);
1387 check_priv();
1388 load_reg( R_ECX, Rn );
1389 ADD_imm8s_r32( -4, R_ECX );
1390 store_reg( R_ECX, Rn );
1391 load_spreg( R_EAX, R_DBR );
1392 MEM_WRITE_LONG( R_ECX, R_EAX );
1393 }
1394 break;
1395 default:
1396 UNDEF();
1397 break;
1398 }
1399 break;
1400 case 0x3:
1401 switch( (ir&0x80) >> 7 ) {
1402 case 0x0:
1403 switch( (ir&0x70) >> 4 ) {
1404 case 0x0:
1405 { /* STC.L SR, @-Rn */
1406 uint32_t Rn = ((ir>>8)&0xF);
1407 check_priv();
1408 load_reg( R_ECX, Rn );
1409 ADD_imm8s_r32( -4, R_ECX );
1410 store_reg( R_ECX, Rn );
1411 call_func0( sh4_read_sr );
1412 MEM_WRITE_LONG( R_ECX, R_EAX );
1413 }
1414 break;
1415 case 0x1:
1416 { /* STC.L GBR, @-Rn */
1417 uint32_t Rn = ((ir>>8)&0xF);
1418 load_reg( R_ECX, Rn );
1419 ADD_imm8s_r32( -4, R_ECX );
1420 store_reg( R_ECX, Rn );
1421 load_spreg( R_EAX, R_GBR );
1422 MEM_WRITE_LONG( R_ECX, R_EAX );
1423 }
1424 break;
1425 case 0x2:
1426 { /* STC.L VBR, @-Rn */
1427 uint32_t Rn = ((ir>>8)&0xF);
1428 check_priv();
1429 load_reg( R_ECX, Rn );
1430 ADD_imm8s_r32( -4, R_ECX );
1431 store_reg( R_ECX, Rn );
1432 load_spreg( R_EAX, R_VBR );
1433 MEM_WRITE_LONG( R_ECX, R_EAX );
1434 }
1435 break;
1436 case 0x3:
1437 { /* STC.L SSR, @-Rn */
1438 uint32_t Rn = ((ir>>8)&0xF);
1439 check_priv();
1440 load_reg( R_ECX, Rn );
1441 ADD_imm8s_r32( -4, R_ECX );
1442 store_reg( R_ECX, Rn );
1443 load_spreg( R_EAX, R_SSR );
1444 MEM_WRITE_LONG( R_ECX, R_EAX );
1445 }
1446 break;
1447 case 0x4:
1448 { /* STC.L SPC, @-Rn */
1449 uint32_t Rn = ((ir>>8)&0xF);
1450 check_priv();
1451 load_reg( R_ECX, Rn );
1452 ADD_imm8s_r32( -4, R_ECX );
1453 store_reg( R_ECX, Rn );
1454 load_spreg( R_EAX, R_SPC );
1455 MEM_WRITE_LONG( R_ECX, R_EAX );
1456 }
1457 break;
1458 default:
1459 UNDEF();
1460 break;
1461 }
1462 break;
1463 case 0x1:
1464 { /* STC.L Rm_BANK, @-Rn */
1465 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm_BANK = ((ir>>4)&0x7);
1466 check_priv();
1467 load_reg( R_ECX, Rn );
1468 ADD_imm8s_r32( -4, R_ECX );
1469 store_reg( R_ECX, Rn );
1470 load_spreg( R_EAX, REG_OFFSET(r_bank[Rm_BANK]) );
1471 MEM_WRITE_LONG( R_ECX, R_EAX );
1472 }
1473 break;
1474 }
1475 break;
1476 case 0x4:
1477 switch( (ir&0xF0) >> 4 ) {
1478 case 0x0:
1479 { /* ROTL Rn */
1480 uint32_t Rn = ((ir>>8)&0xF);
1481 load_reg( R_EAX, Rn );
1482 ROL1_r32( R_EAX );
1483 store_reg( R_EAX, Rn );
1484 SETC_t();
1485 }
1486 break;
1487 case 0x2:
1488 { /* ROTCL Rn */
1489 uint32_t Rn = ((ir>>8)&0xF);
1490 load_reg( R_EAX, Rn );
1491 LDC_t();
1492 RCL1_r32( R_EAX );
1493 store_reg( R_EAX, Rn );
1494 SETC_t();
1495 }
1496 break;
1497 default:
1498 UNDEF();
1499 break;
1500 }
1501 break;
1502 case 0x5:
1503 switch( (ir&0xF0) >> 4 ) {
1504 case 0x0:
1505 { /* ROTR Rn */
1506 uint32_t Rn = ((ir>>8)&0xF);
1507 load_reg( R_EAX, Rn );
1508 ROR1_r32( R_EAX );
1509 store_reg( R_EAX, Rn );
1510 SETC_t();
1511 }
1512 break;
1513 case 0x1:
1514 { /* CMP/PL Rn */
1515 uint32_t Rn = ((ir>>8)&0xF);
1516 load_reg( R_EAX, Rn );
1517 CMP_imm8s_r32( 0, R_EAX );
1518 SETG_t();
1519 }
1520 break;
1521 case 0x2:
1522 { /* ROTCR Rn */
1523 uint32_t Rn = ((ir>>8)&0xF);
1524 load_reg( R_EAX, Rn );
1525 LDC_t();
1526 RCR1_r32( R_EAX );
1527 store_reg( R_EAX, Rn );
1528 SETC_t();
1529 }
1530 break;
1531 default:
1532 UNDEF();
1533 break;
1534 }
1535 break;
1536 case 0x6:
1537 switch( (ir&0xF0) >> 4 ) {
1538 case 0x0:
1539 { /* LDS.L @Rm+, MACH */
1540 uint32_t Rm = ((ir>>8)&0xF);
1541 load_reg( R_EAX, Rm );
1542 MOV_r32_r32( R_EAX, R_ECX );
1543 ADD_imm8s_r32( 4, R_EAX );
1544 store_reg( R_EAX, Rm );
1545 MEM_READ_LONG( R_ECX, R_EAX );
1546 store_spreg( R_EAX, R_MACH );
1547 }
1548 break;
1549 case 0x1:
1550 { /* LDS.L @Rm+, MACL */
1551 uint32_t Rm = ((ir>>8)&0xF);
1552 load_reg( R_EAX, Rm );
1553 MOV_r32_r32( R_EAX, R_ECX );
1554 ADD_imm8s_r32( 4, R_EAX );
1555 store_reg( R_EAX, Rm );
1556 MEM_READ_LONG( R_ECX, R_EAX );
1557 store_spreg( R_EAX, R_MACL );
1558 }
1559 break;
1560 case 0x2:
1561 { /* LDS.L @Rm+, PR */
1562 uint32_t Rm = ((ir>>8)&0xF);
1563 load_reg( R_EAX, Rm );
1564 MOV_r32_r32( R_EAX, R_ECX );
1565 ADD_imm8s_r32( 4, R_EAX );
1566 store_reg( R_EAX, Rm );
1567 MEM_READ_LONG( R_ECX, R_EAX );
1568 store_spreg( R_EAX, R_PR );
1569 }
1570 break;
1571 case 0x3:
1572 { /* LDC.L @Rm+, SGR */
1573 uint32_t Rm = ((ir>>8)&0xF);
1574 check_priv();
1575 load_reg( R_EAX, Rm );
1576 MOV_r32_r32( R_EAX, R_ECX );
1577 ADD_imm8s_r32( 4, R_EAX );
1578 store_reg( R_EAX, Rm );
1579 MEM_READ_LONG( R_ECX, R_EAX );
1580 store_spreg( R_EAX, R_SGR );
1581 }
1582 break;
1583 case 0x5:
1584 { /* LDS.L @Rm+, FPUL */
1585 uint32_t Rm = ((ir>>8)&0xF);
1586 load_reg( R_EAX, Rm );
1587 MOV_r32_r32( R_EAX, R_ECX );
1588 ADD_imm8s_r32( 4, R_EAX );
1589 store_reg( R_EAX, Rm );
1590 MEM_READ_LONG( R_ECX, R_EAX );
1591 store_spreg( R_EAX, R_FPUL );
1592 }
1593 break;
1594 case 0x6:
1595 { /* LDS.L @Rm+, FPSCR */
1596 uint32_t Rm = ((ir>>8)&0xF);
1597 load_reg( R_EAX, Rm );
1598 MOV_r32_r32( R_EAX, R_ECX );
1599 ADD_imm8s_r32( 4, R_EAX );
1600 store_reg( R_EAX, Rm );
1601 MEM_READ_LONG( R_ECX, R_EAX );
1602 store_spreg( R_EAX, R_FPSCR );
1603 update_fr_bank( R_EAX );
1604 }
1605 break;
1606 case 0xF:
1607 { /* LDC.L @Rm+, DBR */
1608 uint32_t Rm = ((ir>>8)&0xF);
1609 check_priv();
1610 load_reg( R_EAX, Rm );
1611 MOV_r32_r32( R_EAX, R_ECX );
1612 ADD_imm8s_r32( 4, R_EAX );
1613 store_reg( R_EAX, Rm );
1614 MEM_READ_LONG( R_ECX, R_EAX );
1615 store_spreg( R_EAX, R_DBR );
1616 }
1617 break;
1618 default:
1619 UNDEF();
1620 break;
1621 }
1622 break;
1623 case 0x7:
1624 switch( (ir&0x80) >> 7 ) {
1625 case 0x0:
1626 switch( (ir&0x70) >> 4 ) {
1627 case 0x0:
1628 { /* LDC.L @Rm+, SR */
1629 uint32_t Rm = ((ir>>8)&0xF);
1630 if( sh4_x86.in_delay_slot ) {
1631 SLOTILLEGAL();
1632 } else {
1633 check_priv();
1634 load_reg( R_EAX, Rm );
1635 MOV_r32_r32( R_EAX, R_ECX );
1636 ADD_imm8s_r32( 4, R_EAX );
1637 store_reg( R_EAX, Rm );
1638 MEM_READ_LONG( R_ECX, R_EAX );
1639 call_func1( sh4_write_sr, R_EAX );
1640 sh4_x86.priv_checked = FALSE;
1641 sh4_x86.fpuen_checked = FALSE;
1642 }
1643 }
1644 break;
1645 case 0x1:
1646 { /* LDC.L @Rm+, GBR */
1647 uint32_t Rm = ((ir>>8)&0xF);
1648 load_reg( R_EAX, Rm );
1649 MOV_r32_r32( R_EAX, R_ECX );
1650 ADD_imm8s_r32( 4, R_EAX );
1651 store_reg( R_EAX, Rm );
1652 MEM_READ_LONG( R_ECX, R_EAX );
1653 store_spreg( R_EAX, R_GBR );
1654 }
1655 break;
1656 case 0x2:
1657 { /* LDC.L @Rm+, VBR */
1658 uint32_t Rm = ((ir>>8)&0xF);
1659 check_priv();
1660 load_reg( R_EAX, Rm );
1661 MOV_r32_r32( R_EAX, R_ECX );
1662 ADD_imm8s_r32( 4, R_EAX );
1663 store_reg( R_EAX, Rm );
1664 MEM_READ_LONG( R_ECX, R_EAX );
1665 store_spreg( R_EAX, R_VBR );
1666 }
1667 break;
1668 case 0x3:
1669 { /* LDC.L @Rm+, SSR */
1670 uint32_t Rm = ((ir>>8)&0xF);
1671 check_priv();
1672 load_reg( R_EAX, Rm );
1673 MOV_r32_r32( R_EAX, R_ECX );
1674 ADD_imm8s_r32( 4, R_EAX );
1675 store_reg( R_EAX, Rm );
1676 MEM_READ_LONG( R_ECX, R_EAX );
1677 store_spreg( R_EAX, R_SSR );
1678 }
1679 break;
1680 case 0x4:
1681 { /* LDC.L @Rm+, SPC */
1682 uint32_t Rm = ((ir>>8)&0xF);
1683 check_priv();
1684 load_reg( R_EAX, Rm );
1685 MOV_r32_r32( R_EAX, R_ECX );
1686 ADD_imm8s_r32( 4, R_EAX );
1687 store_reg( R_EAX, Rm );
1688 MEM_READ_LONG( R_ECX, R_EAX );
1689 store_spreg( R_EAX, R_SPC );
1690 }
1691 break;
1692 default:
1693 UNDEF();
1694 break;
1695 }
1696 break;
1697 case 0x1:
1698 { /* LDC.L @Rm+, Rn_BANK */
1699 uint32_t Rm = ((ir>>8)&0xF); uint32_t Rn_BANK = ((ir>>4)&0x7);
1700 check_priv();
1701 load_reg( R_EAX, Rm );
1702 MOV_r32_r32( R_EAX, R_ECX );
1703 ADD_imm8s_r32( 4, R_EAX );
1704 store_reg( R_EAX, Rm );
1705 MEM_READ_LONG( R_ECX, R_EAX );
1706 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
1707 }
1708 break;
1709 }
1710 break;
1711 case 0x8:
1712 switch( (ir&0xF0) >> 4 ) {
1713 case 0x0:
1714 { /* SHLL2 Rn */
1715 uint32_t Rn = ((ir>>8)&0xF);
1716 load_reg( R_EAX, Rn );
1717 SHL_imm8_r32( 2, R_EAX );
1718 store_reg( R_EAX, Rn );
1719 }
1720 break;
1721 case 0x1:
1722 { /* SHLL8 Rn */
1723 uint32_t Rn = ((ir>>8)&0xF);
1724 load_reg( R_EAX, Rn );
1725 SHL_imm8_r32( 8, R_EAX );
1726 store_reg( R_EAX, Rn );
1727 }
1728 break;
1729 case 0x2:
1730 { /* SHLL16 Rn */
1731 uint32_t Rn = ((ir>>8)&0xF);
1732 load_reg( R_EAX, Rn );
1733 SHL_imm8_r32( 16, R_EAX );
1734 store_reg( R_EAX, Rn );
1735 }
1736 break;
1737 default:
1738 UNDEF();
1739 break;
1740 }
1741 break;
1742 case 0x9:
1743 switch( (ir&0xF0) >> 4 ) {
1744 case 0x0:
1745 { /* SHLR2 Rn */
1746 uint32_t Rn = ((ir>>8)&0xF);
1747 load_reg( R_EAX, Rn );
1748 SHR_imm8_r32( 2, R_EAX );
1749 store_reg( R_EAX, Rn );
1750 }
1751 break;
1752 case 0x1:
1753 { /* SHLR8 Rn */
1754 uint32_t Rn = ((ir>>8)&0xF);
1755 load_reg( R_EAX, Rn );
1756 SHR_imm8_r32( 8, R_EAX );
1757 store_reg( R_EAX, Rn );
1758 }
1759 break;
1760 case 0x2:
1761 { /* SHLR16 Rn */
1762 uint32_t Rn = ((ir>>8)&0xF);
1763 load_reg( R_EAX, Rn );
1764 SHR_imm8_r32( 16, R_EAX );
1765 store_reg( R_EAX, Rn );
1766 }
1767 break;
1768 default:
1769 UNDEF();
1770 break;
1771 }
1772 break;
1773 case 0xA:
1774 switch( (ir&0xF0) >> 4 ) {
1775 case 0x0:
1776 { /* LDS Rm, MACH */
1777 uint32_t Rm = ((ir>>8)&0xF);
1778 load_reg( R_EAX, Rm );
1779 store_spreg( R_EAX, R_MACH );
1780 }
1781 break;
1782 case 0x1:
1783 { /* LDS Rm, MACL */
1784 uint32_t Rm = ((ir>>8)&0xF);
1785 load_reg( R_EAX, Rm );
1786 store_spreg( R_EAX, R_MACL );
1787 }
1788 break;
1789 case 0x2:
1790 { /* LDS Rm, PR */
1791 uint32_t Rm = ((ir>>8)&0xF);
1792 load_reg( R_EAX, Rm );
1793 store_spreg( R_EAX, R_PR );
1794 }
1795 break;
1796 case 0x3:
1797 { /* LDC Rm, SGR */
1798 uint32_t Rm = ((ir>>8)&0xF);
1799 check_priv();
1800 load_reg( R_EAX, Rm );
1801 store_spreg( R_EAX, R_SGR );
1802 }
1803 break;
1804 case 0x5:
1805 { /* LDS Rm, FPUL */
1806 uint32_t Rm = ((ir>>8)&0xF);
1807 load_reg( R_EAX, Rm );
1808 store_spreg( R_EAX, R_FPUL );
1809 }
1810 break;
1811 case 0x6:
1812 { /* LDS Rm, FPSCR */
1813 uint32_t Rm = ((ir>>8)&0xF);
1814 load_reg( R_EAX, Rm );
1815 store_spreg( R_EAX, R_FPSCR );
1816 update_fr_bank( R_EAX );
1817 }
1818 break;
1819 case 0xF:
1820 { /* LDC Rm, DBR */
1821 uint32_t Rm = ((ir>>8)&0xF);
1822 check_priv();
1823 load_reg( R_EAX, Rm );
1824 store_spreg( R_EAX, R_DBR );
1825 }
1826 break;
1827 default:
1828 UNDEF();
1829 break;
1830 }
1831 break;
1832 case 0xB:
1833 switch( (ir&0xF0) >> 4 ) {
1834 case 0x0:
1835 { /* JSR @Rn */
1836 uint32_t Rn = ((ir>>8)&0xF);
1837 if( sh4_x86.in_delay_slot ) {
1838 SLOTILLEGAL();
1839 } else {
1840 load_imm32( R_EAX, pc + 4 );
1841 store_spreg( R_EAX, R_PR );
1842 load_reg( R_EDI, Rn );
1843 sh4_x86.in_delay_slot = TRUE;
1844 return 0;
1845 }
1846 }
1847 break;
1848 case 0x1:
1849 { /* TAS.B @Rn */
1850 uint32_t Rn = ((ir>>8)&0xF);
1851 load_reg( R_ECX, Rn );
1852 MEM_READ_BYTE( R_ECX, R_EAX );
1853 TEST_r8_r8( R_AL, R_AL );
1854 SETE_t();
1855 OR_imm8_r8( 0x80, R_AL );
1856 load_reg( R_ECX, Rn );
1857 MEM_WRITE_BYTE( R_ECX, R_EAX );
1858 }
1859 break;
1860 case 0x2:
1861 { /* JMP @Rn */
1862 uint32_t Rn = ((ir>>8)&0xF);
1863 if( sh4_x86.in_delay_slot ) {
1864 SLOTILLEGAL();
1865 } else {
1866 load_reg( R_EDI, Rn );
1867 sh4_x86.in_delay_slot = TRUE;
1868 return 0;
1869 }
1870 }
1871 break;
1872 default:
1873 UNDEF();
1874 break;
1875 }
1876 break;
1877 case 0xC:
1878 { /* SHAD Rm, Rn */
1879 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1880 /* Annoyingly enough, not directly convertible */
1881 load_reg( R_EAX, Rn );
1882 load_reg( R_ECX, Rm );
1883 CMP_imm32_r32( 0, R_ECX );
1884 JGE_rel8(16, doshl);
1886 NEG_r32( R_ECX ); // 2
1887 AND_imm8_r8( 0x1F, R_CL ); // 3
1888 JE_rel8( 4, emptysar); // 2
1889 SAR_r32_CL( R_EAX ); // 2
1890 JMP_rel8(10, end); // 2
1892 JMP_TARGET(emptysar);
1893 SAR_imm8_r32(31, R_EAX ); // 3
1894 JMP_rel8(5, end2);
1896 JMP_TARGET(doshl);
1897 AND_imm8_r8( 0x1F, R_CL ); // 3
1898 SHL_r32_CL( R_EAX ); // 2
1899 JMP_TARGET(end);
1900 JMP_TARGET(end2);
1901 store_reg( R_EAX, Rn );
1902 }
1903 break;
1904 case 0xD:
1905 { /* SHLD Rm, Rn */
1906 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1907 load_reg( R_EAX, Rn );
1908 load_reg( R_ECX, Rm );
1909 CMP_imm32_r32( 0, R_ECX );
1910 JGE_rel8(15, doshl);
1912 NEG_r32( R_ECX ); // 2
1913 AND_imm8_r8( 0x1F, R_CL ); // 3
1914 JE_rel8( 4, emptyshr );
1915 SHR_r32_CL( R_EAX ); // 2
1916 JMP_rel8(9, end); // 2
1918 JMP_TARGET(emptyshr);
1919 XOR_r32_r32( R_EAX, R_EAX );
1920 JMP_rel8(5, end2);
1922 JMP_TARGET(doshl);
1923 AND_imm8_r8( 0x1F, R_CL ); // 3
1924 SHL_r32_CL( R_EAX ); // 2
1925 JMP_TARGET(end);
1926 JMP_TARGET(end2);
1927 store_reg( R_EAX, Rn );
1928 }
1929 break;
1930 case 0xE:
1931 switch( (ir&0x80) >> 7 ) {
1932 case 0x0:
1933 switch( (ir&0x70) >> 4 ) {
1934 case 0x0:
1935 { /* LDC Rm, SR */
1936 uint32_t Rm = ((ir>>8)&0xF);
1937 if( sh4_x86.in_delay_slot ) {
1938 SLOTILLEGAL();
1939 } else {
1940 check_priv();
1941 load_reg( R_EAX, Rm );
1942 call_func1( sh4_write_sr, R_EAX );
1943 sh4_x86.priv_checked = FALSE;
1944 sh4_x86.fpuen_checked = FALSE;
1945 }
1946 }
1947 break;
1948 case 0x1:
1949 { /* LDC Rm, GBR */
1950 uint32_t Rm = ((ir>>8)&0xF);
1951 load_reg( R_EAX, Rm );
1952 store_spreg( R_EAX, R_GBR );
1953 }
1954 break;
1955 case 0x2:
1956 { /* LDC Rm, VBR */
1957 uint32_t Rm = ((ir>>8)&0xF);
1958 check_priv();
1959 load_reg( R_EAX, Rm );
1960 store_spreg( R_EAX, R_VBR );
1961 }
1962 break;
1963 case 0x3:
1964 { /* LDC Rm, SSR */
1965 uint32_t Rm = ((ir>>8)&0xF);
1966 check_priv();
1967 load_reg( R_EAX, Rm );
1968 store_spreg( R_EAX, R_SSR );
1969 }
1970 break;
1971 case 0x4:
1972 { /* LDC Rm, SPC */
1973 uint32_t Rm = ((ir>>8)&0xF);
1974 check_priv();
1975 load_reg( R_EAX, Rm );
1976 store_spreg( R_EAX, R_SPC );
1977 }
1978 break;
1979 default:
1980 UNDEF();
1981 break;
1982 }
1983 break;
1984 case 0x1:
1985 { /* LDC Rm, Rn_BANK */
1986 uint32_t Rm = ((ir>>8)&0xF); uint32_t Rn_BANK = ((ir>>4)&0x7);
1987 check_priv();
1988 load_reg( R_EAX, Rm );
1989 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
1990 }
1991 break;
1992 }
1993 break;
1994 case 0xF:
1995 { /* MAC.W @Rm+, @Rn+ */
1996 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1997 load_reg( R_ECX, Rm );
1998 check_ralign16( R_ECX );
1999 load_reg( R_ECX, Rn );
2000 check_ralign16( R_ECX );
2001 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
2002 MEM_READ_WORD( R_ECX, R_EAX );
2003 PUSH_r32( R_EAX );
2004 load_reg( R_ECX, Rm );
2005 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
2006 MEM_READ_WORD( R_ECX, R_EAX );
2007 POP_r32( R_ECX );
2008 IMUL_r32( R_ECX );
2010 load_spreg( R_ECX, R_S );
2011 TEST_r32_r32( R_ECX, R_ECX );
2012 JE_rel8( 47, nosat );
2014 ADD_r32_sh4r( R_EAX, R_MACL ); // 6
2015 JNO_rel8( 51, end ); // 2
2016 load_imm32( R_EDX, 1 ); // 5
2017 store_spreg( R_EDX, R_MACH ); // 6
2018 JS_rel8( 13, positive ); // 2
2019 load_imm32( R_EAX, 0x80000000 );// 5
2020 store_spreg( R_EAX, R_MACL ); // 6
2021 JMP_rel8( 25, end2 ); // 2
2023 JMP_TARGET(positive);
2024 load_imm32( R_EAX, 0x7FFFFFFF );// 5
2025 store_spreg( R_EAX, R_MACL ); // 6
2026 JMP_rel8( 12, end3); // 2
2028 JMP_TARGET(nosat);
2029 ADD_r32_sh4r( R_EAX, R_MACL ); // 6
2030 ADC_r32_sh4r( R_EDX, R_MACH ); // 6
2031 JMP_TARGET(end);
2032 JMP_TARGET(end2);
2033 JMP_TARGET(end3);
2034 }
2035 break;
2036 }
2037 break;
2038 case 0x5:
2039 { /* MOV.L @(disp, Rm), Rn */
2040 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2;
2041 load_reg( R_ECX, Rm );
2042 ADD_imm8s_r32( disp, R_ECX );
2043 check_ralign32( R_ECX );
2044 MEM_READ_LONG( R_ECX, R_EAX );
2045 store_reg( R_EAX, Rn );
2046 }
2047 break;
2048 case 0x6:
2049 switch( ir&0xF ) {
2050 case 0x0:
2051 { /* MOV.B @Rm, Rn */
2052 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2053 load_reg( R_ECX, Rm );
2054 MEM_READ_BYTE( R_ECX, R_EAX );
2055 store_reg( R_EAX, Rn );
2056 }
2057 break;
2058 case 0x1:
2059 { /* MOV.W @Rm, Rn */
2060 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2061 load_reg( R_ECX, Rm );
2062 check_ralign16( R_ECX );
2063 MEM_READ_WORD( R_ECX, R_EAX );
2064 store_reg( R_EAX, Rn );
2065 }
2066 break;
2067 case 0x2:
2068 { /* MOV.L @Rm, Rn */
2069 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2070 load_reg( R_ECX, Rm );
2071 check_ralign32( R_ECX );
2072 MEM_READ_LONG( R_ECX, R_EAX );
2073 store_reg( R_EAX, Rn );
2074 }
2075 break;
2076 case 0x3:
2077 { /* MOV Rm, Rn */
2078 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2079 load_reg( R_EAX, Rm );
2080 store_reg( R_EAX, Rn );
2081 }
2082 break;
2083 case 0x4:
2084 { /* MOV.B @Rm+, Rn */
2085 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2086 load_reg( R_ECX, Rm );
2087 MOV_r32_r32( R_ECX, R_EAX );
2088 ADD_imm8s_r32( 1, R_EAX );
2089 store_reg( R_EAX, Rm );
2090 MEM_READ_BYTE( R_ECX, R_EAX );
2091 store_reg( R_EAX, Rn );
2092 }
2093 break;
2094 case 0x5:
2095 { /* MOV.W @Rm+, Rn */
2096 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2097 load_reg( R_EAX, Rm );
2098 check_ralign16( R_EAX );
2099 MOV_r32_r32( R_EAX, R_ECX );
2100 ADD_imm8s_r32( 2, R_EAX );
2101 store_reg( R_EAX, Rm );
2102 MEM_READ_WORD( R_ECX, R_EAX );
2103 store_reg( R_EAX, Rn );
2104 }
2105 break;
2106 case 0x6:
2107 { /* MOV.L @Rm+, Rn */
2108 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2109 load_reg( R_EAX, Rm );
2110 check_ralign32( R_EAX );
2111 MOV_r32_r32( R_EAX, R_ECX );
2112 ADD_imm8s_r32( 4, R_EAX );
2113 store_reg( R_EAX, Rm );
2114 MEM_READ_LONG( R_ECX, R_EAX );
2115 store_reg( R_EAX, Rn );
2116 }
2117 break;
2118 case 0x7:
2119 { /* NOT Rm, Rn */
2120 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2121 load_reg( R_EAX, Rm );
2122 NOT_r32( R_EAX );
2123 store_reg( R_EAX, Rn );
2124 }
2125 break;
2126 case 0x8:
2127 { /* SWAP.B Rm, Rn */
2128 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2129 load_reg( R_EAX, Rm );
2130 XCHG_r8_r8( R_AL, R_AH );
2131 store_reg( R_EAX, Rn );
2132 }
2133 break;
2134 case 0x9:
2135 { /* SWAP.W Rm, Rn */
2136 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2137 load_reg( R_EAX, Rm );
2138 MOV_r32_r32( R_EAX, R_ECX );
2139 SHL_imm8_r32( 16, R_ECX );
2140 SHR_imm8_r32( 16, R_EAX );
2141 OR_r32_r32( R_EAX, R_ECX );
2142 store_reg( R_ECX, Rn );
2143 }
2144 break;
2145 case 0xA:
2146 { /* NEGC Rm, Rn */
2147 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2148 load_reg( R_EAX, Rm );
2149 XOR_r32_r32( R_ECX, R_ECX );
2150 LDC_t();
2151 SBB_r32_r32( R_EAX, R_ECX );
2152 store_reg( R_ECX, Rn );
2153 SETC_t();
2154 }
2155 break;
2156 case 0xB:
2157 { /* NEG Rm, Rn */
2158 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2159 load_reg( R_EAX, Rm );
2160 NEG_r32( R_EAX );
2161 store_reg( R_EAX, Rn );
2162 }
2163 break;
2164 case 0xC:
2165 { /* EXTU.B Rm, Rn */
2166 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2167 load_reg( R_EAX, Rm );
2168 MOVZX_r8_r32( R_EAX, R_EAX );
2169 store_reg( R_EAX, Rn );
2170 }
2171 break;
2172 case 0xD:
2173 { /* EXTU.W Rm, Rn */
2174 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2175 load_reg( R_EAX, Rm );
2176 MOVZX_r16_r32( R_EAX, R_EAX );
2177 store_reg( R_EAX, Rn );
2178 }
2179 break;
2180 case 0xE:
2181 { /* EXTS.B Rm, Rn */
2182 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2183 load_reg( R_EAX, Rm );
2184 MOVSX_r8_r32( R_EAX, R_EAX );
2185 store_reg( R_EAX, Rn );
2186 }
2187 break;
2188 case 0xF:
2189 { /* EXTS.W Rm, Rn */
2190 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2191 load_reg( R_EAX, Rm );
2192 MOVSX_r16_r32( R_EAX, R_EAX );
2193 store_reg( R_EAX, Rn );
2194 }
2195 break;
2196 }
2197 break;
2198 case 0x7:
2199 { /* ADD #imm, Rn */
2200 uint32_t Rn = ((ir>>8)&0xF); int32_t imm = SIGNEXT8(ir&0xFF);
2201 load_reg( R_EAX, Rn );
2202 ADD_imm8s_r32( imm, R_EAX );
2203 store_reg( R_EAX, Rn );
2204 }
2205 break;
2206 case 0x8:
2207 switch( (ir&0xF00) >> 8 ) {
2208 case 0x0:
2209 { /* MOV.B R0, @(disp, Rn) */
2210 uint32_t Rn = ((ir>>4)&0xF); uint32_t disp = (ir&0xF);
2211 load_reg( R_EAX, 0 );
2212 load_reg( R_ECX, Rn );
2213 ADD_imm32_r32( disp, R_ECX );
2214 MEM_WRITE_BYTE( R_ECX, R_EAX );
2215 }
2216 break;
2217 case 0x1:
2218 { /* MOV.W R0, @(disp, Rn) */
2219 uint32_t Rn = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1;
2220 load_reg( R_ECX, Rn );
2221 load_reg( R_EAX, 0 );
2222 ADD_imm32_r32( disp, R_ECX );
2223 check_walign16( R_ECX );
2224 MEM_WRITE_WORD( R_ECX, R_EAX );
2225 }
2226 break;
2227 case 0x4:
2228 { /* MOV.B @(disp, Rm), R0 */
2229 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF);
2230 load_reg( R_ECX, Rm );
2231 ADD_imm32_r32( disp, R_ECX );
2232 MEM_READ_BYTE( R_ECX, R_EAX );
2233 store_reg( R_EAX, 0 );
2234 }
2235 break;
2236 case 0x5:
2237 { /* MOV.W @(disp, Rm), R0 */
2238 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1;
2239 load_reg( R_ECX, Rm );
2240 ADD_imm32_r32( disp, R_ECX );
2241 check_ralign16( R_ECX );
2242 MEM_READ_WORD( R_ECX, R_EAX );
2243 store_reg( R_EAX, 0 );
2244 }
2245 break;
2246 case 0x8:
2247 { /* CMP/EQ #imm, R0 */
2248 int32_t imm = SIGNEXT8(ir&0xFF);
2249 load_reg( R_EAX, 0 );
2250 CMP_imm8s_r32(imm, R_EAX);
2251 SETE_t();
2252 }
2253 break;
2254 case 0x9:
2255 { /* BT disp */
2256 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2257 if( sh4_x86.in_delay_slot ) {
2258 SLOTILLEGAL();
2259 } else {
2260 load_imm32( R_EDI, pc + 2 );
2261 CMP_imm8s_sh4r( 0, R_T );
2262 JE_rel8( 5, nottaken );
2263 load_imm32( R_EDI, disp + pc + 4 );
2264 JMP_TARGET(nottaken);
2265 INC_r32(R_ESI);
2266 return 1;
2267 }
2268 }
2269 break;
2270 case 0xB:
2271 { /* BF disp */
2272 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2273 if( sh4_x86.in_delay_slot ) {
2274 SLOTILLEGAL();
2275 } else {
2276 load_imm32( R_EDI, pc + 2 );
2277 CMP_imm8s_sh4r( 0, R_T );
2278 JNE_rel8( 5, nottaken );
2279 load_imm32( R_EDI, disp + pc + 4 );
2280 JMP_TARGET(nottaken);
2281 INC_r32(R_ESI);
2282 return 1;
2283 }
2284 }
2285 break;
2286 case 0xD:
2287 { /* BT/S disp */
2288 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2289 if( sh4_x86.in_delay_slot ) {
2290 SLOTILLEGAL();
2291 } else {
2292 load_imm32( R_EDI, pc + 4 );
2293 CMP_imm8s_sh4r( 0, R_T );
2294 JE_rel8( 5, nottaken );
2295 load_imm32( R_EDI, disp + pc + 4 );
2296 JMP_TARGET(nottaken);
2297 sh4_x86.in_delay_slot = TRUE;
2298 return 0;
2299 }
2300 }
2301 break;
2302 case 0xF:
2303 { /* BF/S disp */
2304 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2305 if( sh4_x86.in_delay_slot ) {
2306 SLOTILLEGAL();
2307 } else {
2308 load_imm32( R_EDI, pc + 4 );
2309 CMP_imm8s_sh4r( 0, R_T );
2310 JNE_rel8( 5, nottaken );
2311 load_imm32( R_EDI, disp + pc + 4 );
2312 JMP_TARGET(nottaken);
2313 sh4_x86.in_delay_slot = TRUE;
2314 return 0;
2315 }
2316 }
2317 break;
2318 default:
2319 UNDEF();
2320 break;
2321 }
2322 break;
2323 case 0x9:
2324 { /* MOV.W @(disp, PC), Rn */
2325 uint32_t Rn = ((ir>>8)&0xF); uint32_t disp = (ir&0xFF)<<1;
2326 if( sh4_x86.in_delay_slot ) {
2327 SLOTILLEGAL();
2328 } else {
2329 load_imm32( R_ECX, pc + disp + 4 );
2330 MEM_READ_WORD( R_ECX, R_EAX );
2331 store_reg( R_EAX, Rn );
2332 }
2333 }
2334 break;
2335 case 0xA:
2336 { /* BRA disp */
2337 int32_t disp = SIGNEXT12(ir&0xFFF)<<1;
2338 if( sh4_x86.in_delay_slot ) {
2339 SLOTILLEGAL();
2340 } else {
2341 load_imm32( R_EDI, disp + pc + 4 );
2342 sh4_x86.in_delay_slot = TRUE;
2343 return 0;
2344 }
2345 }
2346 break;
2347 case 0xB:
2348 { /* BSR disp */
2349 int32_t disp = SIGNEXT12(ir&0xFFF)<<1;
2350 if( sh4_x86.in_delay_slot ) {
2351 SLOTILLEGAL();
2352 } else {
2353 load_imm32( R_EAX, pc + 4 );
2354 store_spreg( R_EAX, R_PR );
2355 load_imm32( R_EDI, disp + pc + 4 );
2356 sh4_x86.in_delay_slot = TRUE;
2357 return 0;
2358 }
2359 }
2360 break;
2361 case 0xC:
2362 switch( (ir&0xF00) >> 8 ) {
2363 case 0x0:
2364 { /* MOV.B R0, @(disp, GBR) */
2365 uint32_t disp = (ir&0xFF);
2366 load_reg( R_EAX, 0 );
2367 load_spreg( R_ECX, R_GBR );
2368 ADD_imm32_r32( disp, R_ECX );
2369 MEM_WRITE_BYTE( R_ECX, R_EAX );
2370 }
2371 break;
2372 case 0x1:
2373 { /* MOV.W R0, @(disp, GBR) */
2374 uint32_t disp = (ir&0xFF)<<1;
2375 load_spreg( R_ECX, R_GBR );
2376 load_reg( R_EAX, 0 );
2377 ADD_imm32_r32( disp, R_ECX );
2378 check_walign16( R_ECX );
2379 MEM_WRITE_WORD( R_ECX, R_EAX );
2380 }
2381 break;
2382 case 0x2:
2383 { /* MOV.L R0, @(disp, GBR) */
2384 uint32_t disp = (ir&0xFF)<<2;
2385 load_spreg( R_ECX, R_GBR );
2386 load_reg( R_EAX, 0 );
2387 ADD_imm32_r32( disp, R_ECX );
2388 check_walign32( R_ECX );
2389 MEM_WRITE_LONG( R_ECX, R_EAX );
2390 }
2391 break;
2392 case 0x3:
2393 { /* TRAPA #imm */
2394 uint32_t imm = (ir&0xFF);
2395 if( sh4_x86.in_delay_slot ) {
2396 SLOTILLEGAL();
2397 } else {
2398 PUSH_imm32( imm );
2399 call_func0( sh4_raise_trap );
2400 ADD_imm8s_r32( 4, R_ESP );
2401 }
2402 }
2403 break;
2404 case 0x4:
2405 { /* MOV.B @(disp, GBR), R0 */
2406 uint32_t disp = (ir&0xFF);
2407 load_spreg( R_ECX, R_GBR );
2408 ADD_imm32_r32( disp, R_ECX );
2409 MEM_READ_BYTE( R_ECX, R_EAX );
2410 store_reg( R_EAX, 0 );
2411 }
2412 break;
2413 case 0x5:
2414 { /* MOV.W @(disp, GBR), R0 */
2415 uint32_t disp = (ir&0xFF)<<1;
2416 load_spreg( R_ECX, R_GBR );
2417 ADD_imm32_r32( disp, R_ECX );
2418 check_ralign16( R_ECX );
2419 MEM_READ_WORD( R_ECX, R_EAX );
2420 store_reg( R_EAX, 0 );
2421 }
2422 break;
2423 case 0x6:
2424 { /* MOV.L @(disp, GBR), R0 */
2425 uint32_t disp = (ir&0xFF)<<2;
2426 load_spreg( R_ECX, R_GBR );
2427 ADD_imm32_r32( disp, R_ECX );
2428 check_ralign32( R_ECX );
2429 MEM_READ_LONG( R_ECX, R_EAX );
2430 store_reg( R_EAX, 0 );
2431 }
2432 break;
2433 case 0x7:
2434 { /* MOVA @(disp, PC), R0 */
2435 uint32_t disp = (ir&0xFF)<<2;
2436 if( sh4_x86.in_delay_slot ) {
2437 SLOTILLEGAL();
2438 } else {
2439 load_imm32( R_ECX, (pc & 0xFFFFFFFC) + disp + 4 );
2440 store_reg( R_ECX, 0 );
2441 }
2442 }
2443 break;
2444 case 0x8:
2445 { /* TST #imm, R0 */
2446 uint32_t imm = (ir&0xFF);
2447 load_reg( R_EAX, 0 );
2448 TEST_imm32_r32( imm, R_EAX );
2449 SETE_t();
2450 }
2451 break;
2452 case 0x9:
2453 { /* AND #imm, R0 */
2454 uint32_t imm = (ir&0xFF);
2455 load_reg( R_EAX, 0 );
2456 AND_imm32_r32(imm, R_EAX);
2457 store_reg( R_EAX, 0 );
2458 }
2459 break;
2460 case 0xA:
2461 { /* XOR #imm, R0 */
2462 uint32_t imm = (ir&0xFF);
2463 load_reg( R_EAX, 0 );
2464 XOR_imm32_r32( imm, R_EAX );
2465 store_reg( R_EAX, 0 );
2466 }
2467 break;
2468 case 0xB:
2469 { /* OR #imm, R0 */
2470 uint32_t imm = (ir&0xFF);
2471 load_reg( R_EAX, 0 );
2472 OR_imm32_r32(imm, R_EAX);
2473 store_reg( R_EAX, 0 );
2474 }
2475 break;
2476 case 0xC:
2477 { /* TST.B #imm, @(R0, GBR) */
2478 uint32_t imm = (ir&0xFF);
2479 load_reg( R_EAX, 0);
2480 load_reg( R_ECX, R_GBR);
2481 ADD_r32_r32( R_EAX, R_ECX );
2482 MEM_READ_BYTE( R_ECX, R_EAX );
2483 TEST_imm8_r8( imm, R_EAX );
2484 SETE_t();
2485 }
2486 break;
2487 case 0xD:
2488 { /* AND.B #imm, @(R0, GBR) */
2489 uint32_t imm = (ir&0xFF);
2490 load_reg( R_EAX, 0 );
2491 load_spreg( R_ECX, R_GBR );
2492 ADD_r32_r32( R_EAX, R_ECX );
2493 PUSH_r32(R_ECX);
2494 call_func0(sh4_read_byte);
2495 POP_r32(R_ECX);
2496 AND_imm32_r32(imm, R_EAX );
2497 MEM_WRITE_BYTE( R_ECX, R_EAX );
2498 }
2499 break;
2500 case 0xE:
2501 { /* XOR.B #imm, @(R0, GBR) */
2502 uint32_t imm = (ir&0xFF);
2503 load_reg( R_EAX, 0 );
2504 load_spreg( R_ECX, R_GBR );
2505 ADD_r32_r32( R_EAX, R_ECX );
2506 PUSH_r32(R_ECX);
2507 call_func0(sh4_read_byte);
2508 POP_r32(R_ECX);
2509 XOR_imm32_r32( imm, R_EAX );
2510 MEM_WRITE_BYTE( R_ECX, R_EAX );
2511 }
2512 break;
2513 case 0xF:
2514 { /* OR.B #imm, @(R0, GBR) */
2515 uint32_t imm = (ir&0xFF);
2516 load_reg( R_EAX, 0 );
2517 load_spreg( R_ECX, R_GBR );
2518 ADD_r32_r32( R_EAX, R_ECX );
2519 PUSH_r32(R_ECX);
2520 call_func0(sh4_read_byte);
2521 POP_r32(R_ECX);
2522 OR_imm32_r32(imm, R_EAX );
2523 MEM_WRITE_BYTE( R_ECX, R_EAX );
2524 }
2525 break;
2526 }
2527 break;
2528 case 0xD:
2529 { /* MOV.L @(disp, PC), Rn */
2530 uint32_t Rn = ((ir>>8)&0xF); uint32_t disp = (ir&0xFF)<<2;
2531 if( sh4_x86.in_delay_slot ) {
2532 SLOTILLEGAL();
2533 } else {
2534 uint32_t target = (pc & 0xFFFFFFFC) + disp + 4;
2535 char *ptr = mem_get_region(target);
2536 if( ptr != NULL ) {
2537 MOV_moff32_EAX( (uint32_t)ptr );
2538 } else {
2539 load_imm32( R_ECX, target );
2540 MEM_READ_LONG( R_ECX, R_EAX );
2541 }
2542 store_reg( R_EAX, Rn );
2543 }
2544 }
2545 break;
2546 case 0xE:
2547 { /* MOV #imm, Rn */
2548 uint32_t Rn = ((ir>>8)&0xF); int32_t imm = SIGNEXT8(ir&0xFF);
2549 load_imm32( R_EAX, imm );
2550 store_reg( R_EAX, Rn );
2551 }
2552 break;
2553 case 0xF:
2554 switch( ir&0xF ) {
2555 case 0x0:
2556 { /* FADD FRm, FRn */
2557 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2558 check_fpuen();
2559 load_spreg( R_ECX, R_FPSCR );
2560 TEST_imm32_r32( FPSCR_PR, R_ECX );
2561 load_fr_bank( R_EDX );
2562 JNE_rel8(13,doubleprec);
2563 push_fr(R_EDX, FRm);
2564 push_fr(R_EDX, FRn);
2565 FADDP_st(1);
2566 pop_fr(R_EDX, FRn);
2567 JMP_rel8(11,end);
2568 JMP_TARGET(doubleprec);
2569 push_dr(R_EDX, FRm);
2570 push_dr(R_EDX, FRn);
2571 FADDP_st(1);
2572 pop_dr(R_EDX, FRn);
2573 JMP_TARGET(end);
2574 }
2575 break;
2576 case 0x1:
2577 { /* FSUB FRm, FRn */
2578 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2579 check_fpuen();
2580 load_spreg( R_ECX, R_FPSCR );
2581 TEST_imm32_r32( FPSCR_PR, R_ECX );
2582 load_fr_bank( R_EDX );
2583 JNE_rel8(13, doubleprec);
2584 push_fr(R_EDX, FRn);
2585 push_fr(R_EDX, FRm);
2586 FSUBP_st(1);
2587 pop_fr(R_EDX, FRn);
2588 JMP_rel8(11, end);
2589 JMP_TARGET(doubleprec);
2590 push_dr(R_EDX, FRn);
2591 push_dr(R_EDX, FRm);
2592 FSUBP_st(1);
2593 pop_dr(R_EDX, FRn);
2594 JMP_TARGET(end);
2595 }
2596 break;
2597 case 0x2:
2598 { /* FMUL FRm, FRn */
2599 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2600 check_fpuen();
2601 load_spreg( R_ECX, R_FPSCR );
2602 TEST_imm32_r32( FPSCR_PR, R_ECX );
2603 load_fr_bank( R_EDX );
2604 JNE_rel8(13, doubleprec);
2605 push_fr(R_EDX, FRm);
2606 push_fr(R_EDX, FRn);
2607 FMULP_st(1);
2608 pop_fr(R_EDX, FRn);
2609 JMP_rel8(11, end);
2610 JMP_TARGET(doubleprec);
2611 push_dr(R_EDX, FRm);
2612 push_dr(R_EDX, FRn);
2613 FMULP_st(1);
2614 pop_dr(R_EDX, FRn);
2615 JMP_TARGET(end);
2616 }
2617 break;
2618 case 0x3:
2619 { /* FDIV FRm, FRn */
2620 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2621 check_fpuen();
2622 load_spreg( R_ECX, R_FPSCR );
2623 TEST_imm32_r32( FPSCR_PR, R_ECX );
2624 load_fr_bank( R_EDX );
2625 JNE_rel8(13, doubleprec);
2626 push_fr(R_EDX, FRn);
2627 push_fr(R_EDX, FRm);
2628 FDIVP_st(1);
2629 pop_fr(R_EDX, FRn);
2630 JMP_rel8(11, end);
2631 JMP_TARGET(doubleprec);
2632 push_dr(R_EDX, FRn);
2633 push_dr(R_EDX, FRm);
2634 FDIVP_st(1);
2635 pop_dr(R_EDX, FRn);
2636 JMP_TARGET(end);
2637 }
2638 break;
2639 case 0x4:
2640 { /* FCMP/EQ FRm, FRn */
2641 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2642 check_fpuen();
2643 load_spreg( R_ECX, R_FPSCR );
2644 TEST_imm32_r32( FPSCR_PR, R_ECX );
2645 load_fr_bank( R_EDX );
2646 JNE_rel8(8, doubleprec);
2647 push_fr(R_EDX, FRm);
2648 push_fr(R_EDX, FRn);
2649 JMP_rel8(6, end);
2650 JMP_TARGET(doubleprec);
2651 push_dr(R_EDX, FRm);
2652 push_dr(R_EDX, FRn);
2653 JMP_TARGET(end);
2654 FCOMIP_st(1);
2655 SETE_t();
2656 FPOP_st();
2657 }
2658 break;
2659 case 0x5:
2660 { /* FCMP/GT FRm, FRn */
2661 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2662 check_fpuen();
2663 load_spreg( R_ECX, R_FPSCR );
2664 TEST_imm32_r32( FPSCR_PR, R_ECX );
2665 load_fr_bank( R_EDX );
2666 JNE_rel8(8, doubleprec);
2667 push_fr(R_EDX, FRm);
2668 push_fr(R_EDX, FRn);
2669 JMP_rel8(6, end);
2670 JMP_TARGET(doubleprec);
2671 push_dr(R_EDX, FRm);
2672 push_dr(R_EDX, FRn);
2673 JMP_TARGET(end);
2674 FCOMIP_st(1);
2675 SETA_t();
2676 FPOP_st();
2677 }
2678 break;
2679 case 0x6:
2680 { /* FMOV @(R0, Rm), FRn */
2681 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2682 check_fpuen();
2683 load_reg( R_EDX, Rm );
2684 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EDX );
2685 check_ralign32( R_EDX );
2686 load_spreg( R_ECX, R_FPSCR );
2687 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2688 JNE_rel8(19, doublesize);
2689 MEM_READ_LONG( R_EDX, R_EAX );
2690 load_fr_bank( R_ECX );
2691 store_fr( R_ECX, R_EAX, FRn );
2692 if( FRn&1 ) {
2693 JMP_rel8(48, end);
2694 JMP_TARGET(doublesize);
2695 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2696 load_spreg( R_ECX, R_FPSCR ); // assume read_long clobbered it
2697 load_xf_bank( R_ECX );
2698 store_fr( R_ECX, R_EAX, FRn&0x0E );
2699 store_fr( R_ECX, R_EDX, FRn|0x01 );
2700 JMP_TARGET(end);
2701 } else {
2702 JMP_rel8(36, end);
2703 JMP_TARGET(doublesize);
2704 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2705 load_fr_bank( R_ECX );
2706 store_fr( R_ECX, R_EAX, FRn&0x0E );
2707 store_fr( R_ECX, R_EDX, FRn|0x01 );
2708 JMP_TARGET(end);
2709 }
2710 }
2711 break;
2712 case 0x7:
2713 { /* FMOV FRm, @(R0, Rn) */
2714 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2715 check_fpuen();
2716 load_reg( R_EDX, Rn );
2717 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EDX );
2718 check_walign32( R_EDX );
2719 load_spreg( R_ECX, R_FPSCR );
2720 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2721 JNE_rel8(20, doublesize);
2722 load_fr_bank( R_ECX );
2723 load_fr( R_ECX, R_EAX, FRm );
2724 MEM_WRITE_LONG( R_EDX, R_EAX ); // 12
2725 if( FRm&1 ) {
2726 JMP_rel8( 48, end );
2727 JMP_TARGET(doublesize);
2728 load_xf_bank( R_ECX );
2729 load_fr( R_ECX, R_EAX, FRm&0x0E );
2730 load_fr( R_ECX, R_ECX, FRm|0x01 );
2731 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2732 JMP_TARGET(end);
2733 } else {
2734 JMP_rel8( 39, end );
2735 JMP_TARGET(doublesize);
2736 load_fr_bank( R_ECX );
2737 load_fr( R_ECX, R_EAX, FRm&0x0E );
2738 load_fr( R_ECX, R_ECX, FRm|0x01 );
2739 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2740 JMP_TARGET(end);
2741 }
2742 }
2743 break;
2744 case 0x8:
2745 { /* FMOV @Rm, FRn */
2746 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2747 check_fpuen();
2748 load_reg( R_EDX, Rm );
2749 check_ralign32( R_EDX );
2750 load_spreg( R_ECX, R_FPSCR );
2751 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2752 JNE_rel8(19, doublesize);
2753 MEM_READ_LONG( R_EDX, R_EAX );
2754 load_fr_bank( R_ECX );
2755 store_fr( R_ECX, R_EAX, FRn );
2756 if( FRn&1 ) {
2757 JMP_rel8(48, end);
2758 JMP_TARGET(doublesize);
2759 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2760 load_spreg( R_ECX, R_FPSCR ); // assume read_long clobbered it
2761 load_xf_bank( R_ECX );
2762 store_fr( R_ECX, R_EAX, FRn&0x0E );
2763 store_fr( R_ECX, R_EDX, FRn|0x01 );
2764 JMP_TARGET(end);
2765 } else {
2766 JMP_rel8(36, end);
2767 JMP_TARGET(doublesize);
2768 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2769 load_fr_bank( R_ECX );
2770 store_fr( R_ECX, R_EAX, FRn&0x0E );
2771 store_fr( R_ECX, R_EDX, FRn|0x01 );
2772 JMP_TARGET(end);
2773 }
2774 }
2775 break;
2776 case 0x9:
2777 { /* FMOV @Rm+, FRn */
2778 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2779 check_fpuen();
2780 load_reg( R_EDX, Rm );
2781 check_ralign32( R_EDX );
2782 MOV_r32_r32( R_EDX, R_EAX );
2783 load_spreg( R_ECX, R_FPSCR );
2784 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2785 JNE_rel8(25, doublesize);
2786 ADD_imm8s_r32( 4, R_EAX );
2787 store_reg( R_EAX, Rm );
2788 MEM_READ_LONG( R_EDX, R_EAX );
2789 load_fr_bank( R_ECX );
2790 store_fr( R_ECX, R_EAX, FRn );
2791 if( FRn&1 ) {
2792 JMP_rel8(54, end);
2793 JMP_TARGET(doublesize);
2794 ADD_imm8s_r32( 8, R_EAX );
2795 store_reg(R_EAX, Rm);
2796 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2797 load_spreg( R_ECX, R_FPSCR ); // assume read_long clobbered it
2798 load_xf_bank( R_ECX );
2799 store_fr( R_ECX, R_EAX, FRn&0x0E );
2800 store_fr( R_ECX, R_EDX, FRn|0x01 );
2801 JMP_TARGET(end);
2802 } else {
2803 JMP_rel8(42, end);
2804 ADD_imm8s_r32( 8, R_EAX );
2805 store_reg(R_EAX, Rm);
2806 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2807 load_fr_bank( R_ECX );
2808 store_fr( R_ECX, R_EAX, FRn&0x0E );
2809 store_fr( R_ECX, R_EDX, FRn|0x01 );
2810 JMP_TARGET(end);
2811 }
2812 }
2813 break;
2814 case 0xA:
2815 { /* FMOV FRm, @Rn */
2816 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2817 check_fpuen();
2818 load_reg( R_EDX, Rn );
2819 check_walign32( R_EDX );
2820 load_spreg( R_ECX, R_FPSCR );
2821 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2822 JNE_rel8(20, doublesize);
2823 load_fr_bank( R_ECX );
2824 load_fr( R_ECX, R_EAX, FRm );
2825 MEM_WRITE_LONG( R_EDX, R_EAX ); // 12
2826 if( FRm&1 ) {
2827 JMP_rel8( 48, end );
2828 JMP_TARGET(doublesize);
2829 load_xf_bank( R_ECX );
2830 load_fr( R_ECX, R_EAX, FRm&0x0E );
2831 load_fr( R_ECX, R_ECX, FRm|0x01 );
2832 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2833 JMP_TARGET(end);
2834 } else {
2835 JMP_rel8( 39, end );
2836 JMP_TARGET(doublesize);
2837 load_fr_bank( R_ECX );
2838 load_fr( R_ECX, R_EAX, FRm&0x0E );
2839 load_fr( R_ECX, R_ECX, FRm|0x01 );
2840 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2841 JMP_TARGET(end);
2842 }
2843 }
2844 break;
2845 case 0xB:
2846 { /* FMOV FRm, @-Rn */
2847 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2848 check_fpuen();
2849 load_reg( R_EDX, Rn );
2850 check_walign32( R_EDX );
2851 load_spreg( R_ECX, R_FPSCR );
2852 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2853 JNE_rel8(26, doublesize);
2854 load_fr_bank( R_ECX );
2855 load_fr( R_ECX, R_EAX, FRm );
2856 ADD_imm8s_r32(-4,R_EDX);
2857 store_reg( R_EDX, Rn );
2858 MEM_WRITE_LONG( R_EDX, R_EAX ); // 12
2859 if( FRm&1 ) {
2860 JMP_rel8( 54, end );
2861 JMP_TARGET(doublesize);
2862 load_xf_bank( R_ECX );
2863 load_fr( R_ECX, R_EAX, FRm&0x0E );
2864 load_fr( R_ECX, R_ECX, FRm|0x01 );
2865 ADD_imm8s_r32(-8,R_EDX);
2866 store_reg( R_EDX, Rn );
2867 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2868 JMP_TARGET(end);
2869 } else {
2870 JMP_rel8( 45, end );
2871 JMP_TARGET(doublesize);
2872 load_fr_bank( R_ECX );
2873 load_fr( R_ECX, R_EAX, FRm&0x0E );
2874 load_fr( R_ECX, R_ECX, FRm|0x01 );
2875 ADD_imm8s_r32(-8,R_EDX);
2876 store_reg( R_EDX, Rn );
2877 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2878 JMP_TARGET(end);
2879 }
2880 }
2881 break;
2882 case 0xC:
2883 { /* FMOV FRm, FRn */
2884 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2885 /* As horrible as this looks, it's actually covering 5 separate cases:
2886 * 1. 32-bit fr-to-fr (PR=0)
2887 * 2. 64-bit dr-to-dr (PR=1, FRm&1 == 0, FRn&1 == 0 )
2888 * 3. 64-bit dr-to-xd (PR=1, FRm&1 == 0, FRn&1 == 1 )
2889 * 4. 64-bit xd-to-dr (PR=1, FRm&1 == 1, FRn&1 == 0 )
2890 * 5. 64-bit xd-to-xd (PR=1, FRm&1 == 1, FRn&1 == 1 )
2891 */
2892 check_fpuen();
2893 load_spreg( R_ECX, R_FPSCR );
2894 load_fr_bank( R_EDX );
2895 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2896 JNE_rel8(8, doublesize);
2897 load_fr( R_EDX, R_EAX, FRm ); // PR=0 branch
2898 store_fr( R_EDX, R_EAX, FRn );
2899 if( FRm&1 ) {
2900 JMP_rel8(24, end);
2901 JMP_TARGET(doublesize);
2902 load_xf_bank( R_ECX );
2903 load_fr( R_ECX, R_EAX, FRm-1 );
2904 if( FRn&1 ) {
2905 load_fr( R_ECX, R_EDX, FRm );
2906 store_fr( R_ECX, R_EAX, FRn-1 );
2907 store_fr( R_ECX, R_EDX, FRn );
2908 } else /* FRn&1 == 0 */ {
2909 load_fr( R_ECX, R_ECX, FRm );
2910 store_fr( R_EDX, R_EAX, FRn );
2911 store_fr( R_EDX, R_ECX, FRn+1 );
2912 }
2913 JMP_TARGET(end);
2914 } else /* FRm&1 == 0 */ {
2915 if( FRn&1 ) {
2916 JMP_rel8(24, end);
2917 load_xf_bank( R_ECX );
2918 load_fr( R_EDX, R_EAX, FRm );
2919 load_fr( R_EDX, R_EDX, FRm+1 );
2920 store_fr( R_ECX, R_EAX, FRn-1 );
2921 store_fr( R_ECX, R_EDX, FRn );
2922 JMP_TARGET(end);
2923 } else /* FRn&1 == 0 */ {
2924 JMP_rel8(12, end);
2925 load_fr( R_EDX, R_EAX, FRm );
2926 load_fr( R_EDX, R_ECX, FRm+1 );
2927 store_fr( R_EDX, R_EAX, FRn );
2928 store_fr( R_EDX, R_ECX, FRn+1 );
2929 JMP_TARGET(end);
2930 }
2931 }
2932 }
2933 break;
2934 case 0xD:
2935 switch( (ir&0xF0) >> 4 ) {
2936 case 0x0:
2937 { /* FSTS FPUL, FRn */
2938 uint32_t FRn = ((ir>>8)&0xF);
2939 check_fpuen();
2940 load_fr_bank( R_ECX );
2941 load_spreg( R_EAX, R_FPUL );
2942 store_fr( R_ECX, R_EAX, FRn );
2943 }
2944 break;
2945 case 0x1:
2946 { /* FLDS FRm, FPUL */
2947 uint32_t FRm = ((ir>>8)&0xF);
2948 check_fpuen();
2949 load_fr_bank( R_ECX );
2950 load_fr( R_ECX, R_EAX, FRm );
2951 store_spreg( R_EAX, R_FPUL );
2952 }
2953 break;
2954 case 0x2:
2955 { /* FLOAT FPUL, FRn */
2956 uint32_t FRn = ((ir>>8)&0xF);
2957 check_fpuen();
2958 load_spreg( R_ECX, R_FPSCR );
2959 load_spreg(R_EDX, REG_OFFSET(fr_bank));
2960 FILD_sh4r(R_FPUL);
2961 TEST_imm32_r32( FPSCR_PR, R_ECX );
2962 JNE_rel8(5, doubleprec);
2963 pop_fr( R_EDX, FRn );
2964 JMP_rel8(3, end);
2965 JMP_TARGET(doubleprec);
2966 pop_dr( R_EDX, FRn );
2967 JMP_TARGET(end);
2968 }
2969 break;
2970 case 0x3:
2971 { /* FTRC FRm, FPUL */
2972 uint32_t FRm = ((ir>>8)&0xF);
2973 check_fpuen();
2974 load_spreg( R_ECX, R_FPSCR );
2975 load_fr_bank( R_EDX );
2976 TEST_imm32_r32( FPSCR_PR, R_ECX );
2977 JNE_rel8(5, doubleprec);
2978 push_fr( R_EDX, FRm );
2979 JMP_rel8(3, doop);
2980 JMP_TARGET(doubleprec);
2981 push_dr( R_EDX, FRm );
2982 JMP_TARGET( doop );
2983 load_imm32( R_ECX, (uint32_t)&max_int );
2984 FILD_r32ind( R_ECX );
2985 FCOMIP_st(1);
2986 JNA_rel8( 16, sat );
2987 load_imm32( R_ECX, (uint32_t)&min_int ); // 5
2988 FILD_r32ind( R_ECX ); // 2
2989 FCOMIP_st(1); // 2
2990 JAE_rel8( 5, sat2 ); // 2
2991 FISTP_sh4r(R_FPUL); // 3
2992 JMP_rel8( 9, end ); // 2
2994 JMP_TARGET(sat);
2995 JMP_TARGET(sat2);
2996 MOV_r32ind_r32( R_ECX, R_ECX ); // 2
2997 store_spreg( R_ECX, R_FPUL );
2998 FPOP_st();
2999 JMP_TARGET(end);
3000 }
3001 break;
3002 case 0x4:
3003 { /* FNEG FRn */
3004 uint32_t FRn = ((ir>>8)&0xF);
3005 check_fpuen();
3006 load_spreg( R_ECX, R_FPSCR );
3007 TEST_imm32_r32( FPSCR_PR, R_ECX );
3008 load_fr_bank( R_EDX );
3009 JNE_rel8(10, doubleprec);
3010 push_fr(R_EDX, FRn);
3011 FCHS_st0();
3012 pop_fr(R_EDX, FRn);
3013 JMP_rel8(8, end);
3014 JMP_TARGET(doubleprec);
3015 push_dr(R_EDX, FRn);
3016 FCHS_st0();
3017 pop_dr(R_EDX, FRn);
3018 JMP_TARGET(end);
3019 }
3020 break;
3021 case 0x5:
3022 { /* FABS FRn */
3023 uint32_t FRn = ((ir>>8)&0xF);
3024 check_fpuen();
3025 load_spreg( R_ECX, R_FPSCR );
3026 load_fr_bank( R_EDX );
3027 TEST_imm32_r32( FPSCR_PR, R_ECX );
3028 JNE_rel8(10, doubleprec);
3029 push_fr(R_EDX, FRn); // 3
3030 FABS_st0(); // 2
3031 pop_fr( R_EDX, FRn); //3
3032 JMP_rel8(8,end); // 2
3033 JMP_TARGET(doubleprec);
3034 push_dr(R_EDX, FRn);
3035 FABS_st0();
3036 pop_dr(R_EDX, FRn);
3037 JMP_TARGET(end);
3038 }
3039 break;
3040 case 0x6:
3041 { /* FSQRT FRn */
3042 uint32_t FRn = ((ir>>8)&0xF);
3043 check_fpuen();
3044 load_spreg( R_ECX, R_FPSCR );
3045 TEST_imm32_r32( FPSCR_PR, R_ECX );
3046 load_fr_bank( R_EDX );
3047 JNE_rel8(10, doubleprec);
3048 push_fr(R_EDX, FRn);
3049 FSQRT_st0();
3050 pop_fr(R_EDX, FRn);
3051 JMP_rel8(8, end);
3052 JMP_TARGET(doubleprec);
3053 push_dr(R_EDX, FRn);
3054 FSQRT_st0();
3055 pop_dr(R_EDX, FRn);
3056 JMP_TARGET(end);
3057 }
3058 break;
3059 case 0x7:
3060 { /* FSRRA FRn */
3061 uint32_t FRn = ((ir>>8)&0xF);
3062 check_fpuen();
3063 load_spreg( R_ECX, R_FPSCR );
3064 TEST_imm32_r32( FPSCR_PR, R_ECX );
3065 load_fr_bank( R_EDX );
3066 JNE_rel8(12, end); // PR=0 only
3067 FLD1_st0();
3068 push_fr(R_EDX, FRn);
3069 FSQRT_st0();
3070 FDIVP_st(1);
3071 pop_fr(R_EDX, FRn);
3072 JMP_TARGET(end);
3073 }
3074 break;
3075 case 0x8:
3076 { /* FLDI0 FRn */
3077 uint32_t FRn = ((ir>>8)&0xF);
3078 /* IFF PR=0 */
3079 check_fpuen();
3080 load_spreg( R_ECX, R_FPSCR );
3081 TEST_imm32_r32( FPSCR_PR, R_ECX );
3082 JNE_rel8(8, end);
3083 XOR_r32_r32( R_EAX, R_EAX );
3084 load_spreg( R_ECX, REG_OFFSET(fr_bank) );
3085 store_fr( R_ECX, R_EAX, FRn );
3086 JMP_TARGET(end);
3087 }
3088 break;
3089 case 0x9:
3090 { /* FLDI1 FRn */
3091 uint32_t FRn = ((ir>>8)&0xF);
3092 /* IFF PR=0 */
3093 check_fpuen();
3094 load_spreg( R_ECX, R_FPSCR );
3095 TEST_imm32_r32( FPSCR_PR, R_ECX );
3096 JNE_rel8(11, end);
3097 load_imm32(R_EAX, 0x3F800000);
3098 load_spreg( R_ECX, REG_OFFSET(fr_bank) );
3099 store_fr( R_ECX, R_EAX, FRn );
3100 JMP_TARGET(end);
3101 }
3102 break;
3103 case 0xA:
3104 { /* FCNVSD FPUL, FRn */
3105 uint32_t FRn = ((ir>>8)&0xF);
3106 check_fpuen();
3107 load_spreg( R_ECX, R_FPSCR );
3108 TEST_imm32_r32( FPSCR_PR, R_ECX );
3109 JE_rel8(9, end); // only when PR=1
3110 load_fr_bank( R_ECX );
3111 push_fpul();
3112 pop_dr( R_ECX, FRn );
3113 JMP_TARGET(end);
3114 }
3115 break;
3116 case 0xB:
3117 { /* FCNVDS FRm, FPUL */
3118 uint32_t FRm = ((ir>>8)&0xF);
3119 check_fpuen();
3120 load_spreg( R_ECX, R_FPSCR );
3121 TEST_imm32_r32( FPSCR_PR, R_ECX );
3122 JE_rel8(9, end); // only when PR=1
3123 load_fr_bank( R_ECX );
3124 push_dr( R_ECX, FRm );
3125 pop_fpul();
3126 JMP_TARGET(end);
3127 }
3128 break;
3129 case 0xE:
3130 { /* FIPR FVm, FVn */
3131 uint32_t FVn = ((ir>>10)&0x3); uint32_t FVm = ((ir>>8)&0x3);
3132 check_fpuen();
3133 load_spreg( R_ECX, R_FPSCR );
3134 TEST_imm32_r32( FPSCR_PR, R_ECX );
3135 JNE_rel8(44, doubleprec);
3137 load_fr_bank( R_ECX );
3138 push_fr( R_ECX, FVm<<2 );
3139 push_fr( R_ECX, FVn<<2 );
3140 FMULP_st(1);
3141 push_fr( R_ECX, (FVm<<2)+1);
3142 push_fr( R_ECX, (FVn<<2)+1);
3143 FMULP_st(1);
3144 FADDP_st(1);
3145 push_fr( R_ECX, (FVm<<2)+2);
3146 push_fr( R_ECX, (FVn<<2)+2);
3147 FMULP_st(1);
3148 FADDP_st(1);
3149 push_fr( R_ECX, (FVm<<2)+3);
3150 push_fr( R_ECX, (FVn<<2)+3);
3151 FMULP_st(1);
3152 FADDP_st(1);
3153 pop_fr( R_ECX, (FVn<<2)+3);
3154 JMP_TARGET(doubleprec);
3155 }
3156 break;
3157 case 0xF:
3158 switch( (ir&0x100) >> 8 ) {
3159 case 0x0:
3160 { /* FSCA FPUL, FRn */
3161 uint32_t FRn = ((ir>>9)&0x7)<<1;
3162 check_fpuen();
3163 load_spreg( R_ECX, R_FPSCR );
3164 TEST_imm32_r32( FPSCR_PR, R_ECX );
3165 JNE_rel8( 21, doubleprec );
3166 load_fr_bank( R_ECX );
3167 ADD_imm8s_r32( (FRn&0x0E)<<2, R_ECX );
3168 load_spreg( R_EDX, R_FPUL );
3169 call_func2( sh4_fsca, R_EDX, R_ECX );
3170 JMP_TARGET(doubleprec);
3171 }
3172 break;
3173 case 0x1:
3174 switch( (ir&0x200) >> 9 ) {
3175 case 0x0:
3176 { /* FTRV XMTRX, FVn */
3177 uint32_t FVn = ((ir>>10)&0x3);
3178 check_fpuen();
3179 load_spreg( R_ECX, R_FPSCR );
3180 TEST_imm32_r32( FPSCR_PR, R_ECX );
3181 JNE_rel8( 30, doubleprec );
3182 load_fr_bank( R_EDX ); // 3
3183 ADD_imm8s_r32( FVn<<4, R_EDX ); // 3
3184 load_xf_bank( R_ECX ); // 12
3185 call_func2( sh4_ftrv, R_EDX, R_ECX ); // 12
3186 JMP_TARGET(doubleprec);
3187 }
3188 break;
3189 case 0x1:
3190 switch( (ir&0xC00) >> 10 ) {
3191 case 0x0:
3192 { /* FSCHG */
3193 check_fpuen();
3194 load_spreg( R_ECX, R_FPSCR );
3195 XOR_imm32_r32( FPSCR_SZ, R_ECX );
3196 store_spreg( R_ECX, R_FPSCR );
3197 }
3198 break;
3199 case 0x2:
3200 { /* FRCHG */
3201 check_fpuen();
3202 load_spreg( R_ECX, R_FPSCR );
3203 XOR_imm32_r32( FPSCR_FR, R_ECX );
3204 store_spreg( R_ECX, R_FPSCR );
3205 update_fr_bank( R_ECX );
3206 }
3207 break;
3208 case 0x3:
3209 { /* UNDEF */
3210 if( sh4_x86.in_delay_slot ) {
3211 SLOTILLEGAL();
3212 } else {
3213 JMP_exit(EXIT_ILLEGAL);
3214 return 1;
3215 }
3216 }
3217 break;
3218 default:
3219 UNDEF();
3220 break;
3221 }
3222 break;
3223 }
3224 break;
3225 }
3226 break;
3227 default:
3228 UNDEF();
3229 break;
3230 }
3231 break;
3232 case 0xE:
3233 { /* FMAC FR0, FRm, FRn */
3234 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
3235 check_fpuen();
3236 load_spreg( R_ECX, R_FPSCR );
3237 load_spreg( R_EDX, REG_OFFSET(fr_bank));
3238 TEST_imm32_r32( FPSCR_PR, R_ECX );
3239 JNE_rel8(18, doubleprec);
3240 push_fr( R_EDX, 0 );
3241 push_fr( R_EDX, FRm );
3242 FMULP_st(1);
3243 push_fr( R_EDX, FRn );
3244 FADDP_st(1);
3245 pop_fr( R_EDX, FRn );
3246 JMP_rel8(16, end);
3247 JMP_TARGET(doubleprec);
3248 push_dr( R_EDX, 0 );
3249 push_dr( R_EDX, FRm );
3250 FMULP_st(1);
3251 push_dr( R_EDX, FRn );
3252 FADDP_st(1);
3253 pop_dr( R_EDX, FRn );
3254 JMP_TARGET(end);
3255 }
3256 break;
3257 default:
3258 UNDEF();
3259 break;
3260 }
3261 break;
3262 }
3264 if( sh4_x86.in_delay_slot ) {
3265 ADD_imm8s_r32(2,R_ESI);
3266 sh4_x86.in_delay_slot = FALSE;
3267 return 1;
3268 } else {
3269 INC_r32(R_ESI);
3270 }
3271 return 0;
3272 }
.