filename | src/sh4/sh4x86.c |
changeset | 401:f79327f39818 |
prev | 397:640324505325 |
next | 408:af496b734734 |
author | nkeynes |
date | Fri Sep 28 07:25:22 2007 +0000 (16 years ago) |
permissions | -rw-r--r-- |
last change | Remove MMU check (probably shouldn't be here anyway), and disable TRACE_IO checks by default |
view | annotate | diff | log | raw |
1 /**
2 * $Id: sh4x86.c,v 1.14 2007-09-20 08:37:19 nkeynes Exp $
3 *
4 * SH4 => x86 translation. This version does no real optimization, it just
5 * outputs straight-line x86 code - it mainly exists to provide a baseline
6 * to test the optimizing versions against.
7 *
8 * Copyright (c) 2007 Nathan Keynes.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
21 #include <assert.h>
22 #include <math.h>
24 #ifndef NDEBUG
25 #define DEBUG_JUMPS 1
26 #endif
28 #include "sh4/sh4core.h"
29 #include "sh4/sh4trans.h"
30 #include "sh4/sh4mmio.h"
31 #include "sh4/x86op.h"
32 #include "clock.h"
34 #define DEFAULT_BACKPATCH_SIZE 4096
36 /**
37 * Struct to manage internal translation state. This state is not saved -
38 * it is only valid between calls to sh4_translate_begin_block() and
39 * sh4_translate_end_block()
40 */
41 struct sh4_x86_state {
42 gboolean in_delay_slot;
43 gboolean priv_checked; /* true if we've already checked the cpu mode. */
44 gboolean fpuen_checked; /* true if we've already checked fpu enabled. */
45 int exit_code;
47 /* Allocated memory for the (block-wide) back-patch list */
48 uint32_t **backpatch_list;
49 uint32_t backpatch_posn;
50 uint32_t backpatch_size;
51 };
53 #define EXIT_DATA_ADDR_READ 0
54 #define EXIT_DATA_ADDR_WRITE 7
55 #define EXIT_ILLEGAL 14
56 #define EXIT_SLOT_ILLEGAL 21
57 #define EXIT_FPU_DISABLED 28
58 #define EXIT_SLOT_FPU_DISABLED 35
60 static struct sh4_x86_state sh4_x86;
62 static uint32_t max_int = 0x7FFFFFFF;
63 static uint32_t min_int = 0x80000000;
64 static uint32_t save_fcw; /* save value for fpu control word */
65 static uint32_t trunc_fcw = 0x0F7F; /* fcw value for truncation mode */
67 void sh4_x86_init()
68 {
69 sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE);
70 sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(uint32_t *);
71 }
74 static void sh4_x86_add_backpatch( uint8_t *ptr )
75 {
76 if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) {
77 sh4_x86.backpatch_size <<= 1;
78 sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list, sh4_x86.backpatch_size * sizeof(uint32_t *) );
79 assert( sh4_x86.backpatch_list != NULL );
80 }
81 sh4_x86.backpatch_list[sh4_x86.backpatch_posn++] = (uint32_t *)ptr;
82 }
84 static void sh4_x86_do_backpatch( uint8_t *reloc_base )
85 {
86 unsigned int i;
87 for( i=0; i<sh4_x86.backpatch_posn; i++ ) {
88 *sh4_x86.backpatch_list[i] += (reloc_base - ((uint8_t *)sh4_x86.backpatch_list[i]) - 4);
89 }
90 }
92 /**
93 * Emit an instruction to load an SH4 reg into a real register
94 */
95 static inline void load_reg( int x86reg, int sh4reg )
96 {
97 /* mov [bp+n], reg */
98 OP(0x8B);
99 OP(0x45 + (x86reg<<3));
100 OP(REG_OFFSET(r[sh4reg]));
101 }
103 static inline void load_reg16s( int x86reg, int sh4reg )
104 {
105 OP(0x0F);
106 OP(0xBF);
107 MODRM_r32_sh4r(x86reg, REG_OFFSET(r[sh4reg]));
108 }
110 static inline void load_reg16u( int x86reg, int sh4reg )
111 {
112 OP(0x0F);
113 OP(0xB7);
114 MODRM_r32_sh4r(x86reg, REG_OFFSET(r[sh4reg]));
116 }
118 #define load_spreg( x86reg, regoff ) MOV_sh4r_r32( regoff, x86reg )
119 #define store_spreg( x86reg, regoff ) MOV_r32_sh4r( x86reg, regoff )
120 /**
121 * Emit an instruction to load an immediate value into a register
122 */
123 static inline void load_imm32( int x86reg, uint32_t value ) {
124 /* mov #value, reg */
125 OP(0xB8 + x86reg);
126 OP32(value);
127 }
129 /**
130 * Emit an instruction to store an SH4 reg (RN)
131 */
132 void static inline store_reg( int x86reg, int sh4reg ) {
133 /* mov reg, [bp+n] */
134 OP(0x89);
135 OP(0x45 + (x86reg<<3));
136 OP(REG_OFFSET(r[sh4reg]));
137 }
139 #define load_fr_bank(bankreg) load_spreg( bankreg, REG_OFFSET(fr_bank))
141 /**
142 * Load an FR register (single-precision floating point) into an integer x86
143 * register (eg for register-to-register moves)
144 */
145 void static inline load_fr( int bankreg, int x86reg, int frm )
146 {
147 OP(0x8B); OP(0x40+bankreg+(x86reg<<3)); OP((frm^1)<<2);
148 }
150 /**
151 * Store an FR register (single-precision floating point) into an integer x86
152 * register (eg for register-to-register moves)
153 */
154 void static inline store_fr( int bankreg, int x86reg, int frn )
155 {
156 OP(0x89); OP(0x40+bankreg+(x86reg<<3)); OP((frn^1)<<2);
157 }
160 /**
161 * Load a pointer to the back fp back into the specified x86 register. The
162 * bankreg must have been previously loaded with FPSCR.
163 * NB: 12 bytes
164 */
165 static inline void load_xf_bank( int bankreg )
166 {
167 NOT_r32( bankreg );
168 SHR_imm8_r32( (21 - 6), bankreg ); // Extract bit 21 then *64 for bank size
169 AND_imm8s_r32( 0x40, bankreg ); // Complete extraction
170 OP(0x8D); OP(0x44+(bankreg<<3)); OP(0x28+bankreg); OP(REG_OFFSET(fr)); // LEA [ebp+bankreg+disp], bankreg
171 }
173 /**
174 * Update the fr_bank pointer based on the current fpscr value.
175 */
176 static inline void update_fr_bank( int fpscrreg )
177 {
178 SHR_imm8_r32( (21 - 6), fpscrreg ); // Extract bit 21 then *64 for bank size
179 AND_imm8s_r32( 0x40, fpscrreg ); // Complete extraction
180 OP(0x8D); OP(0x44+(fpscrreg<<3)); OP(0x28+fpscrreg); OP(REG_OFFSET(fr)); // LEA [ebp+fpscrreg+disp], fpscrreg
181 store_spreg( fpscrreg, REG_OFFSET(fr_bank) );
182 }
183 /**
184 * Push FPUL (as a 32-bit float) onto the FPU stack
185 */
186 static inline void push_fpul( )
187 {
188 OP(0xD9); OP(0x45); OP(R_FPUL);
189 }
191 /**
192 * Pop FPUL (as a 32-bit float) from the FPU stack
193 */
194 static inline void pop_fpul( )
195 {
196 OP(0xD9); OP(0x5D); OP(R_FPUL);
197 }
199 /**
200 * Push a 32-bit float onto the FPU stack, with bankreg previously loaded
201 * with the location of the current fp bank.
202 */
203 static inline void push_fr( int bankreg, int frm )
204 {
205 OP(0xD9); OP(0x40 + bankreg); OP((frm^1)<<2); // FLD.S [bankreg + frm^1*4]
206 }
208 /**
209 * Pop a 32-bit float from the FPU stack and store it back into the fp bank,
210 * with bankreg previously loaded with the location of the current fp bank.
211 */
212 static inline void pop_fr( int bankreg, int frm )
213 {
214 OP(0xD9); OP(0x58 + bankreg); OP((frm^1)<<2); // FST.S [bankreg + frm^1*4]
215 }
217 /**
218 * Push a 64-bit double onto the FPU stack, with bankreg previously loaded
219 * with the location of the current fp bank.
220 */
221 static inline void push_dr( int bankreg, int frm )
222 {
223 OP(0xDD); OP(0x40 + bankreg); OP(frm<<2); // FLD.D [bankreg + frm*4]
224 }
226 static inline void pop_dr( int bankreg, int frm )
227 {
228 OP(0xDD); OP(0x58 + bankreg); OP(frm<<2); // FST.D [bankreg + frm*4]
229 }
231 /**
232 * Note: clobbers EAX to make the indirect call - this isn't usually
233 * a problem since the callee will usually clobber it anyway.
234 */
235 static inline void call_func0( void *ptr )
236 {
237 load_imm32(R_EAX, (uint32_t)ptr);
238 CALL_r32(R_EAX);
239 }
241 static inline void call_func1( void *ptr, int arg1 )
242 {
243 PUSH_r32(arg1);
244 call_func0(ptr);
245 ADD_imm8s_r32( 4, R_ESP );
246 }
248 static inline void call_func2( void *ptr, int arg1, int arg2 )
249 {
250 PUSH_r32(arg2);
251 PUSH_r32(arg1);
252 call_func0(ptr);
253 ADD_imm8s_r32( 8, R_ESP );
254 }
256 /**
257 * Write a double (64-bit) value into memory, with the first word in arg2a, and
258 * the second in arg2b
259 * NB: 30 bytes
260 */
261 static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
262 {
263 ADD_imm8s_r32( 4, addr );
264 PUSH_r32(arg2b);
265 PUSH_r32(addr);
266 ADD_imm8s_r32( -4, addr );
267 PUSH_r32(arg2a);
268 PUSH_r32(addr);
269 call_func0(sh4_write_long);
270 ADD_imm8s_r32( 8, R_ESP );
271 call_func0(sh4_write_long);
272 ADD_imm8s_r32( 8, R_ESP );
273 }
275 /**
276 * Read a double (64-bit) value from memory, writing the first word into arg2a
277 * and the second into arg2b. The addr must not be in EAX
278 * NB: 27 bytes
279 */
280 static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
281 {
282 PUSH_r32(addr);
283 call_func0(sh4_read_long);
284 POP_r32(addr);
285 PUSH_r32(R_EAX);
286 ADD_imm8s_r32( 4, addr );
287 PUSH_r32(addr);
288 call_func0(sh4_read_long);
289 ADD_imm8s_r32( 4, R_ESP );
290 MOV_r32_r32( R_EAX, arg2b );
291 POP_r32(arg2a);
292 }
294 /* Exception checks - Note that all exception checks will clobber EAX */
295 static void check_priv( )
296 {
297 if( !sh4_x86.priv_checked ) {
298 sh4_x86.priv_checked = TRUE;
299 load_spreg( R_EAX, R_SR );
300 AND_imm32_r32( SR_MD, R_EAX );
301 if( sh4_x86.in_delay_slot ) {
302 JE_exit( EXIT_SLOT_ILLEGAL );
303 } else {
304 JE_exit( EXIT_ILLEGAL );
305 }
306 }
307 }
309 static void check_fpuen( )
310 {
311 if( !sh4_x86.fpuen_checked ) {
312 sh4_x86.fpuen_checked = TRUE;
313 load_spreg( R_EAX, R_SR );
314 AND_imm32_r32( SR_FD, R_EAX );
315 if( sh4_x86.in_delay_slot ) {
316 JNE_exit(EXIT_SLOT_FPU_DISABLED);
317 } else {
318 JNE_exit(EXIT_FPU_DISABLED);
319 }
320 }
321 }
323 static void check_ralign16( int x86reg )
324 {
325 TEST_imm32_r32( 0x00000001, x86reg );
326 JNE_exit(EXIT_DATA_ADDR_READ);
327 }
329 static void check_walign16( int x86reg )
330 {
331 TEST_imm32_r32( 0x00000001, x86reg );
332 JNE_exit(EXIT_DATA_ADDR_WRITE);
333 }
335 static void check_ralign32( int x86reg )
336 {
337 TEST_imm32_r32( 0x00000003, x86reg );
338 JNE_exit(EXIT_DATA_ADDR_READ);
339 }
340 static void check_walign32( int x86reg )
341 {
342 TEST_imm32_r32( 0x00000003, x86reg );
343 JNE_exit(EXIT_DATA_ADDR_WRITE);
344 }
346 #define UNDEF()
347 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
348 #define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
349 #define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
350 #define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
351 #define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
352 #define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
353 #define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
355 #define SLOTILLEGAL() JMP_exit(EXIT_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1;
359 /**
360 * Emit the 'start of block' assembly. Sets up the stack frame and save
361 * SI/DI as required
362 */
363 void sh4_translate_begin_block()
364 {
365 PUSH_r32(R_EBP);
366 /* mov &sh4r, ebp */
367 load_imm32( R_EBP, (uint32_t)&sh4r );
368 PUSH_r32(R_EDI);
369 PUSH_r32(R_ESI);
370 XOR_r32_r32(R_ESI, R_ESI);
372 sh4_x86.in_delay_slot = FALSE;
373 sh4_x86.priv_checked = FALSE;
374 sh4_x86.fpuen_checked = FALSE;
375 sh4_x86.backpatch_posn = 0;
376 sh4_x86.exit_code = 1;
377 }
379 /**
380 * Exit the block early (ie branch out), conditionally or otherwise
381 */
382 void exit_block( )
383 {
384 store_spreg( R_EDI, REG_OFFSET(pc) );
385 MOV_moff32_EAX( (uint32_t)&sh4_cpu_period );
386 load_spreg( R_ECX, REG_OFFSET(slice_cycle) );
387 MUL_r32( R_ESI );
388 ADD_r32_r32( R_EAX, R_ECX );
389 store_spreg( R_ECX, REG_OFFSET(slice_cycle) );
390 load_imm32( R_EAX, sh4_x86.exit_code );
391 POP_r32(R_ESI);
392 POP_r32(R_EDI);
393 POP_r32(R_EBP);
394 RET();
395 }
397 /**
398 * Flush any open regs back to memory, restore SI/DI/, update PC, etc
399 */
400 void sh4_translate_end_block( sh4addr_t pc ) {
401 assert( !sh4_x86.in_delay_slot ); // should never stop here
402 // Normal termination - save PC, cycle count
403 exit_block( );
405 if( sh4_x86.backpatch_posn != 0 ) {
406 uint8_t *end_ptr = xlat_output;
407 // Exception termination. Jump block for various exception codes:
408 PUSH_imm32( EXC_DATA_ADDR_READ );
409 JMP_rel8( 33, target1 );
410 PUSH_imm32( EXC_DATA_ADDR_WRITE );
411 JMP_rel8( 26, target2 );
412 PUSH_imm32( EXC_ILLEGAL );
413 JMP_rel8( 19, target3 );
414 PUSH_imm32( EXC_SLOT_ILLEGAL );
415 JMP_rel8( 12, target4 );
416 PUSH_imm32( EXC_FPU_DISABLED );
417 JMP_rel8( 5, target5 );
418 PUSH_imm32( EXC_SLOT_FPU_DISABLED );
419 // target
420 JMP_TARGET(target1);
421 JMP_TARGET(target2);
422 JMP_TARGET(target3);
423 JMP_TARGET(target4);
424 JMP_TARGET(target5);
425 load_spreg( R_ECX, REG_OFFSET(pc) );
426 ADD_r32_r32( R_ESI, R_ECX );
427 ADD_r32_r32( R_ESI, R_ECX );
428 store_spreg( R_ECX, REG_OFFSET(pc) );
429 MOV_moff32_EAX( (uint32_t)&sh4_cpu_period );
430 load_spreg( R_ECX, REG_OFFSET(slice_cycle) );
431 MUL_r32( R_ESI );
432 ADD_r32_r32( R_EAX, R_ECX );
433 store_spreg( R_ECX, REG_OFFSET(slice_cycle) );
435 load_imm32( R_EAX, (uint32_t)sh4_raise_exception ); // 6
436 CALL_r32( R_EAX ); // 2
437 ADD_imm8s_r32( 4, R_ESP );
438 POP_r32(R_ESI);
439 POP_r32(R_EDI);
440 POP_r32(R_EBP);
441 RET();
443 sh4_x86_do_backpatch( end_ptr );
444 }
446 }
449 extern uint16_t *sh4_icache;
450 extern uint32_t sh4_icache_addr;
452 /**
453 * Translate a single instruction. Delayed branches are handled specially
454 * by translating both branch and delayed instruction as a single unit (as
455 *
456 *
457 * @return true if the instruction marks the end of a basic block
458 * (eg a branch or
459 */
460 uint32_t sh4_x86_translate_instruction( uint32_t pc )
461 {
462 uint32_t ir;
463 /* Read instruction */
464 uint32_t pageaddr = pc >> 12;
465 if( sh4_icache != NULL && pageaddr == sh4_icache_addr ) {
466 ir = sh4_icache[(pc&0xFFF)>>1];
467 } else {
468 sh4_icache = (uint16_t *)mem_get_page(pc);
469 if( ((uint32_t)sh4_icache) < MAX_IO_REGIONS ) {
470 /* If someone's actually been so daft as to try to execute out of an IO
471 * region, fallback on the full-blown memory read
472 */
473 sh4_icache = NULL;
474 ir = sh4_read_word(pc);
475 } else {
476 sh4_icache_addr = pageaddr;
477 ir = sh4_icache[(pc&0xFFF)>>1];
478 }
479 }
481 switch( (ir&0xF000) >> 12 ) {
482 case 0x0:
483 switch( ir&0xF ) {
484 case 0x2:
485 switch( (ir&0x80) >> 7 ) {
486 case 0x0:
487 switch( (ir&0x70) >> 4 ) {
488 case 0x0:
489 { /* STC SR, Rn */
490 uint32_t Rn = ((ir>>8)&0xF);
491 check_priv();
492 call_func0(sh4_read_sr);
493 store_reg( R_EAX, Rn );
494 }
495 break;
496 case 0x1:
497 { /* STC GBR, Rn */
498 uint32_t Rn = ((ir>>8)&0xF);
499 load_spreg( R_EAX, R_GBR );
500 store_reg( R_EAX, Rn );
501 }
502 break;
503 case 0x2:
504 { /* STC VBR, Rn */
505 uint32_t Rn = ((ir>>8)&0xF);
506 check_priv();
507 load_spreg( R_EAX, R_VBR );
508 store_reg( R_EAX, Rn );
509 }
510 break;
511 case 0x3:
512 { /* STC SSR, Rn */
513 uint32_t Rn = ((ir>>8)&0xF);
514 check_priv();
515 load_spreg( R_EAX, R_SSR );
516 store_reg( R_EAX, Rn );
517 }
518 break;
519 case 0x4:
520 { /* STC SPC, Rn */
521 uint32_t Rn = ((ir>>8)&0xF);
522 check_priv();
523 load_spreg( R_EAX, R_SPC );
524 store_reg( R_EAX, Rn );
525 }
526 break;
527 default:
528 UNDEF();
529 break;
530 }
531 break;
532 case 0x1:
533 { /* STC Rm_BANK, Rn */
534 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm_BANK = ((ir>>4)&0x7);
535 check_priv();
536 load_spreg( R_EAX, REG_OFFSET(r_bank[Rm_BANK]) );
537 store_reg( R_EAX, Rn );
538 }
539 break;
540 }
541 break;
542 case 0x3:
543 switch( (ir&0xF0) >> 4 ) {
544 case 0x0:
545 { /* BSRF Rn */
546 uint32_t Rn = ((ir>>8)&0xF);
547 if( sh4_x86.in_delay_slot ) {
548 SLOTILLEGAL();
549 } else {
550 load_imm32( R_EAX, pc + 4 );
551 store_spreg( R_EAX, R_PR );
552 load_reg( R_EDI, Rn );
553 ADD_r32_r32( R_EAX, R_EDI );
554 sh4_x86.in_delay_slot = TRUE;
555 return 0;
556 }
557 }
558 break;
559 case 0x2:
560 { /* BRAF Rn */
561 uint32_t Rn = ((ir>>8)&0xF);
562 if( sh4_x86.in_delay_slot ) {
563 SLOTILLEGAL();
564 } else {
565 load_reg( R_EDI, Rn );
566 ADD_imm32_r32( pc + 4, R_EDI );
567 sh4_x86.in_delay_slot = TRUE;
568 return 0;
569 }
570 }
571 break;
572 case 0x8:
573 { /* PREF @Rn */
574 uint32_t Rn = ((ir>>8)&0xF);
575 load_reg( R_EAX, Rn );
576 PUSH_r32( R_EAX );
577 AND_imm32_r32( 0xFC000000, R_EAX );
578 CMP_imm32_r32( 0xE0000000, R_EAX );
579 JNE_rel8(7, end);
580 call_func0( sh4_flush_store_queue );
581 JMP_TARGET(end);
582 ADD_imm8s_r32( 4, R_ESP );
583 }
584 break;
585 case 0x9:
586 { /* OCBI @Rn */
587 uint32_t Rn = ((ir>>8)&0xF);
588 }
589 break;
590 case 0xA:
591 { /* OCBP @Rn */
592 uint32_t Rn = ((ir>>8)&0xF);
593 }
594 break;
595 case 0xB:
596 { /* OCBWB @Rn */
597 uint32_t Rn = ((ir>>8)&0xF);
598 }
599 break;
600 case 0xC:
601 { /* MOVCA.L R0, @Rn */
602 uint32_t Rn = ((ir>>8)&0xF);
603 load_reg( R_EAX, 0 );
604 load_reg( R_ECX, Rn );
605 check_walign32( R_ECX );
606 MEM_WRITE_LONG( R_ECX, R_EAX );
607 }
608 break;
609 default:
610 UNDEF();
611 break;
612 }
613 break;
614 case 0x4:
615 { /* MOV.B Rm, @(R0, Rn) */
616 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
617 load_reg( R_EAX, 0 );
618 load_reg( R_ECX, Rn );
619 ADD_r32_r32( R_EAX, R_ECX );
620 load_reg( R_EAX, Rm );
621 MEM_WRITE_BYTE( R_ECX, R_EAX );
622 }
623 break;
624 case 0x5:
625 { /* MOV.W Rm, @(R0, Rn) */
626 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
627 load_reg( R_EAX, 0 );
628 load_reg( R_ECX, Rn );
629 ADD_r32_r32( R_EAX, R_ECX );
630 check_walign16( R_ECX );
631 load_reg( R_EAX, Rm );
632 MEM_WRITE_WORD( R_ECX, R_EAX );
633 }
634 break;
635 case 0x6:
636 { /* MOV.L Rm, @(R0, Rn) */
637 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
638 load_reg( R_EAX, 0 );
639 load_reg( R_ECX, Rn );
640 ADD_r32_r32( R_EAX, R_ECX );
641 check_walign32( R_ECX );
642 load_reg( R_EAX, Rm );
643 MEM_WRITE_LONG( R_ECX, R_EAX );
644 }
645 break;
646 case 0x7:
647 { /* MUL.L Rm, Rn */
648 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
649 load_reg( R_EAX, Rm );
650 load_reg( R_ECX, Rn );
651 MUL_r32( R_ECX );
652 store_spreg( R_EAX, R_MACL );
653 }
654 break;
655 case 0x8:
656 switch( (ir&0xFF0) >> 4 ) {
657 case 0x0:
658 { /* CLRT */
659 CLC();
660 SETC_t();
661 }
662 break;
663 case 0x1:
664 { /* SETT */
665 STC();
666 SETC_t();
667 }
668 break;
669 case 0x2:
670 { /* CLRMAC */
671 XOR_r32_r32(R_EAX, R_EAX);
672 store_spreg( R_EAX, R_MACL );
673 store_spreg( R_EAX, R_MACH );
674 }
675 break;
676 case 0x3:
677 { /* LDTLB */
678 }
679 break;
680 case 0x4:
681 { /* CLRS */
682 CLC();
683 SETC_sh4r(R_S);
684 }
685 break;
686 case 0x5:
687 { /* SETS */
688 STC();
689 SETC_sh4r(R_S);
690 }
691 break;
692 default:
693 UNDEF();
694 break;
695 }
696 break;
697 case 0x9:
698 switch( (ir&0xF0) >> 4 ) {
699 case 0x0:
700 { /* NOP */
701 /* Do nothing. Well, we could emit an 0x90, but what would really be the point? */
702 }
703 break;
704 case 0x1:
705 { /* DIV0U */
706 XOR_r32_r32( R_EAX, R_EAX );
707 store_spreg( R_EAX, R_Q );
708 store_spreg( R_EAX, R_M );
709 store_spreg( R_EAX, R_T );
710 }
711 break;
712 case 0x2:
713 { /* MOVT Rn */
714 uint32_t Rn = ((ir>>8)&0xF);
715 load_spreg( R_EAX, R_T );
716 store_reg( R_EAX, Rn );
717 }
718 break;
719 default:
720 UNDEF();
721 break;
722 }
723 break;
724 case 0xA:
725 switch( (ir&0xF0) >> 4 ) {
726 case 0x0:
727 { /* STS MACH, Rn */
728 uint32_t Rn = ((ir>>8)&0xF);
729 load_spreg( R_EAX, R_MACH );
730 store_reg( R_EAX, Rn );
731 }
732 break;
733 case 0x1:
734 { /* STS MACL, Rn */
735 uint32_t Rn = ((ir>>8)&0xF);
736 load_spreg( R_EAX, R_MACL );
737 store_reg( R_EAX, Rn );
738 }
739 break;
740 case 0x2:
741 { /* STS PR, Rn */
742 uint32_t Rn = ((ir>>8)&0xF);
743 load_spreg( R_EAX, R_PR );
744 store_reg( R_EAX, Rn );
745 }
746 break;
747 case 0x3:
748 { /* STC SGR, Rn */
749 uint32_t Rn = ((ir>>8)&0xF);
750 check_priv();
751 load_spreg( R_EAX, R_SGR );
752 store_reg( R_EAX, Rn );
753 }
754 break;
755 case 0x5:
756 { /* STS FPUL, Rn */
757 uint32_t Rn = ((ir>>8)&0xF);
758 load_spreg( R_EAX, R_FPUL );
759 store_reg( R_EAX, Rn );
760 }
761 break;
762 case 0x6:
763 { /* STS FPSCR, Rn */
764 uint32_t Rn = ((ir>>8)&0xF);
765 load_spreg( R_EAX, R_FPSCR );
766 store_reg( R_EAX, Rn );
767 }
768 break;
769 case 0xF:
770 { /* STC DBR, Rn */
771 uint32_t Rn = ((ir>>8)&0xF);
772 check_priv();
773 load_spreg( R_EAX, R_DBR );
774 store_reg( R_EAX, Rn );
775 }
776 break;
777 default:
778 UNDEF();
779 break;
780 }
781 break;
782 case 0xB:
783 switch( (ir&0xFF0) >> 4 ) {
784 case 0x0:
785 { /* RTS */
786 if( sh4_x86.in_delay_slot ) {
787 SLOTILLEGAL();
788 } else {
789 load_spreg( R_EDI, R_PR );
790 sh4_x86.in_delay_slot = TRUE;
791 return 0;
792 }
793 }
794 break;
795 case 0x1:
796 { /* SLEEP */
797 check_priv();
798 call_func0( sh4_sleep );
799 sh4_x86.exit_code = 0;
800 sh4_x86.in_delay_slot = FALSE;
801 INC_r32(R_ESI);
802 return 1;
803 }
804 break;
805 case 0x2:
806 { /* RTE */
807 check_priv();
808 if( sh4_x86.in_delay_slot ) {
809 SLOTILLEGAL();
810 } else {
811 load_spreg( R_EDI, R_SPC );
812 load_spreg( R_EAX, R_SSR );
813 call_func1( sh4_write_sr, R_EAX );
814 sh4_x86.in_delay_slot = TRUE;
815 sh4_x86.priv_checked = FALSE;
816 sh4_x86.fpuen_checked = FALSE;
817 return 0;
818 }
819 }
820 break;
821 default:
822 UNDEF();
823 break;
824 }
825 break;
826 case 0xC:
827 { /* MOV.B @(R0, Rm), Rn */
828 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
829 load_reg( R_EAX, 0 );
830 load_reg( R_ECX, Rm );
831 ADD_r32_r32( R_EAX, R_ECX );
832 MEM_READ_BYTE( R_ECX, R_EAX );
833 store_reg( R_EAX, Rn );
834 }
835 break;
836 case 0xD:
837 { /* MOV.W @(R0, Rm), Rn */
838 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
839 load_reg( R_EAX, 0 );
840 load_reg( R_ECX, Rm );
841 ADD_r32_r32( R_EAX, R_ECX );
842 check_ralign16( R_ECX );
843 MEM_READ_WORD( R_ECX, R_EAX );
844 store_reg( R_EAX, Rn );
845 }
846 break;
847 case 0xE:
848 { /* MOV.L @(R0, Rm), Rn */
849 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
850 load_reg( R_EAX, 0 );
851 load_reg( R_ECX, Rm );
852 ADD_r32_r32( R_EAX, R_ECX );
853 check_ralign32( R_ECX );
854 MEM_READ_LONG( R_ECX, R_EAX );
855 store_reg( R_EAX, Rn );
856 }
857 break;
858 case 0xF:
859 { /* MAC.L @Rm+, @Rn+ */
860 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
861 load_reg( R_ECX, Rm );
862 check_ralign32( R_ECX );
863 load_reg( R_ECX, Rn );
864 check_ralign32( R_ECX );
865 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
866 MEM_READ_LONG( R_ECX, R_EAX );
867 PUSH_r32( R_EAX );
868 load_reg( R_ECX, Rm );
869 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
870 MEM_READ_LONG( R_ECX, R_EAX );
871 POP_r32( R_ECX );
872 IMUL_r32( R_ECX );
873 ADD_r32_sh4r( R_EAX, R_MACL );
874 ADC_r32_sh4r( R_EDX, R_MACH );
876 load_spreg( R_ECX, R_S );
877 TEST_r32_r32(R_ECX, R_ECX);
878 JE_rel8( 7, nosat );
879 call_func0( signsat48 );
880 JMP_TARGET( nosat );
881 }
882 break;
883 default:
884 UNDEF();
885 break;
886 }
887 break;
888 case 0x1:
889 { /* MOV.L Rm, @(disp, Rn) */
890 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2;
891 load_reg( R_ECX, Rn );
892 load_reg( R_EAX, Rm );
893 ADD_imm32_r32( disp, R_ECX );
894 check_walign32( R_ECX );
895 MEM_WRITE_LONG( R_ECX, R_EAX );
896 }
897 break;
898 case 0x2:
899 switch( ir&0xF ) {
900 case 0x0:
901 { /* MOV.B Rm, @Rn */
902 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
903 load_reg( R_EAX, Rm );
904 load_reg( R_ECX, Rn );
905 MEM_WRITE_BYTE( R_ECX, R_EAX );
906 }
907 break;
908 case 0x1:
909 { /* MOV.W Rm, @Rn */
910 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
911 load_reg( R_ECX, Rn );
912 check_walign16( R_ECX );
913 load_reg( R_EAX, Rm );
914 MEM_WRITE_WORD( R_ECX, R_EAX );
915 }
916 break;
917 case 0x2:
918 { /* MOV.L Rm, @Rn */
919 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
920 load_reg( R_EAX, Rm );
921 load_reg( R_ECX, Rn );
922 check_walign32(R_ECX);
923 MEM_WRITE_LONG( R_ECX, R_EAX );
924 }
925 break;
926 case 0x4:
927 { /* MOV.B Rm, @-Rn */
928 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
929 load_reg( R_EAX, Rm );
930 load_reg( R_ECX, Rn );
931 ADD_imm8s_r32( -1, R_ECX );
932 store_reg( R_ECX, Rn );
933 MEM_WRITE_BYTE( R_ECX, R_EAX );
934 }
935 break;
936 case 0x5:
937 { /* MOV.W Rm, @-Rn */
938 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
939 load_reg( R_ECX, Rn );
940 check_walign16( R_ECX );
941 load_reg( R_EAX, Rm );
942 ADD_imm8s_r32( -2, R_ECX );
943 store_reg( R_ECX, Rn );
944 MEM_WRITE_WORD( R_ECX, R_EAX );
945 }
946 break;
947 case 0x6:
948 { /* MOV.L Rm, @-Rn */
949 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
950 load_reg( R_EAX, Rm );
951 load_reg( R_ECX, Rn );
952 check_walign32( R_ECX );
953 ADD_imm8s_r32( -4, R_ECX );
954 store_reg( R_ECX, Rn );
955 MEM_WRITE_LONG( R_ECX, R_EAX );
956 }
957 break;
958 case 0x7:
959 { /* DIV0S Rm, Rn */
960 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
961 load_reg( R_EAX, Rm );
962 load_reg( R_ECX, Rn );
963 SHR_imm8_r32( 31, R_EAX );
964 SHR_imm8_r32( 31, R_ECX );
965 store_spreg( R_EAX, R_M );
966 store_spreg( R_ECX, R_Q );
967 CMP_r32_r32( R_EAX, R_ECX );
968 SETNE_t();
969 }
970 break;
971 case 0x8:
972 { /* TST Rm, Rn */
973 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
974 load_reg( R_EAX, Rm );
975 load_reg( R_ECX, Rn );
976 TEST_r32_r32( R_EAX, R_ECX );
977 SETE_t();
978 }
979 break;
980 case 0x9:
981 { /* AND Rm, Rn */
982 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
983 load_reg( R_EAX, Rm );
984 load_reg( R_ECX, Rn );
985 AND_r32_r32( R_EAX, R_ECX );
986 store_reg( R_ECX, Rn );
987 }
988 break;
989 case 0xA:
990 { /* XOR Rm, Rn */
991 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
992 load_reg( R_EAX, Rm );
993 load_reg( R_ECX, Rn );
994 XOR_r32_r32( R_EAX, R_ECX );
995 store_reg( R_ECX, Rn );
996 }
997 break;
998 case 0xB:
999 { /* OR Rm, Rn */
1000 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1001 load_reg( R_EAX, Rm );
1002 load_reg( R_ECX, Rn );
1003 OR_r32_r32( R_EAX, R_ECX );
1004 store_reg( R_ECX, Rn );
1005 }
1006 break;
1007 case 0xC:
1008 { /* CMP/STR Rm, Rn */
1009 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1010 load_reg( R_EAX, Rm );
1011 load_reg( R_ECX, Rn );
1012 XOR_r32_r32( R_ECX, R_EAX );
1013 TEST_r8_r8( R_AL, R_AL );
1014 JE_rel8(13, target1);
1015 TEST_r8_r8( R_AH, R_AH ); // 2
1016 JE_rel8(9, target2);
1017 SHR_imm8_r32( 16, R_EAX ); // 3
1018 TEST_r8_r8( R_AL, R_AL ); // 2
1019 JE_rel8(2, target3);
1020 TEST_r8_r8( R_AH, R_AH ); // 2
1021 JMP_TARGET(target1);
1022 JMP_TARGET(target2);
1023 JMP_TARGET(target3);
1024 SETE_t();
1025 }
1026 break;
1027 case 0xD:
1028 { /* XTRCT Rm, Rn */
1029 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1030 load_reg( R_EAX, Rm );
1031 load_reg( R_ECX, Rn );
1032 SHL_imm8_r32( 16, R_EAX );
1033 SHR_imm8_r32( 16, R_ECX );
1034 OR_r32_r32( R_EAX, R_ECX );
1035 store_reg( R_ECX, Rn );
1036 }
1037 break;
1038 case 0xE:
1039 { /* MULU.W Rm, Rn */
1040 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1041 load_reg16u( R_EAX, Rm );
1042 load_reg16u( R_ECX, Rn );
1043 MUL_r32( R_ECX );
1044 store_spreg( R_EAX, R_MACL );
1045 }
1046 break;
1047 case 0xF:
1048 { /* MULS.W Rm, Rn */
1049 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1050 load_reg16s( R_EAX, Rm );
1051 load_reg16s( R_ECX, Rn );
1052 MUL_r32( R_ECX );
1053 store_spreg( R_EAX, R_MACL );
1054 }
1055 break;
1056 default:
1057 UNDEF();
1058 break;
1059 }
1060 break;
1061 case 0x3:
1062 switch( ir&0xF ) {
1063 case 0x0:
1064 { /* CMP/EQ Rm, Rn */
1065 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1066 load_reg( R_EAX, Rm );
1067 load_reg( R_ECX, Rn );
1068 CMP_r32_r32( R_EAX, R_ECX );
1069 SETE_t();
1070 }
1071 break;
1072 case 0x2:
1073 { /* CMP/HS Rm, Rn */
1074 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1075 load_reg( R_EAX, Rm );
1076 load_reg( R_ECX, Rn );
1077 CMP_r32_r32( R_EAX, R_ECX );
1078 SETAE_t();
1079 }
1080 break;
1081 case 0x3:
1082 { /* CMP/GE Rm, Rn */
1083 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1084 load_reg( R_EAX, Rm );
1085 load_reg( R_ECX, Rn );
1086 CMP_r32_r32( R_EAX, R_ECX );
1087 SETGE_t();
1088 }
1089 break;
1090 case 0x4:
1091 { /* DIV1 Rm, Rn */
1092 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1093 load_spreg( R_ECX, R_M );
1094 load_reg( R_EAX, Rn );
1095 LDC_t();
1096 RCL1_r32( R_EAX );
1097 SETC_r8( R_DL ); // Q'
1098 CMP_sh4r_r32( R_Q, R_ECX );
1099 JE_rel8(5, mqequal);
1100 ADD_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
1101 JMP_rel8(3, end);
1102 JMP_TARGET(mqequal);
1103 SUB_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
1104 JMP_TARGET(end);
1105 store_reg( R_EAX, Rn ); // Done with Rn now
1106 SETC_r8(R_AL); // tmp1
1107 XOR_r8_r8( R_DL, R_AL ); // Q' = Q ^ tmp1
1108 XOR_r8_r8( R_AL, R_CL ); // Q'' = Q' ^ M
1109 store_spreg( R_ECX, R_Q );
1110 XOR_imm8s_r32( 1, R_AL ); // T = !Q'
1111 MOVZX_r8_r32( R_AL, R_EAX );
1112 store_spreg( R_EAX, R_T );
1113 }
1114 break;
1115 case 0x5:
1116 { /* DMULU.L Rm, Rn */
1117 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1118 load_reg( R_EAX, Rm );
1119 load_reg( R_ECX, Rn );
1120 MUL_r32(R_ECX);
1121 store_spreg( R_EDX, R_MACH );
1122 store_spreg( R_EAX, R_MACL );
1123 }
1124 break;
1125 case 0x6:
1126 { /* CMP/HI Rm, Rn */
1127 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1128 load_reg( R_EAX, Rm );
1129 load_reg( R_ECX, Rn );
1130 CMP_r32_r32( R_EAX, R_ECX );
1131 SETA_t();
1132 }
1133 break;
1134 case 0x7:
1135 { /* CMP/GT Rm, Rn */
1136 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1137 load_reg( R_EAX, Rm );
1138 load_reg( R_ECX, Rn );
1139 CMP_r32_r32( R_EAX, R_ECX );
1140 SETG_t();
1141 }
1142 break;
1143 case 0x8:
1144 { /* SUB Rm, Rn */
1145 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1146 load_reg( R_EAX, Rm );
1147 load_reg( R_ECX, Rn );
1148 SUB_r32_r32( R_EAX, R_ECX );
1149 store_reg( R_ECX, Rn );
1150 }
1151 break;
1152 case 0xA:
1153 { /* SUBC Rm, Rn */
1154 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1155 load_reg( R_EAX, Rm );
1156 load_reg( R_ECX, Rn );
1157 LDC_t();
1158 SBB_r32_r32( R_EAX, R_ECX );
1159 store_reg( R_ECX, Rn );
1160 SETC_t();
1161 }
1162 break;
1163 case 0xB:
1164 { /* SUBV Rm, Rn */
1165 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1166 load_reg( R_EAX, Rm );
1167 load_reg( R_ECX, Rn );
1168 SUB_r32_r32( R_EAX, R_ECX );
1169 store_reg( R_ECX, Rn );
1170 SETO_t();
1171 }
1172 break;
1173 case 0xC:
1174 { /* ADD Rm, Rn */
1175 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1176 load_reg( R_EAX, Rm );
1177 load_reg( R_ECX, Rn );
1178 ADD_r32_r32( R_EAX, R_ECX );
1179 store_reg( R_ECX, Rn );
1180 }
1181 break;
1182 case 0xD:
1183 { /* DMULS.L Rm, Rn */
1184 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1185 load_reg( R_EAX, Rm );
1186 load_reg( R_ECX, Rn );
1187 IMUL_r32(R_ECX);
1188 store_spreg( R_EDX, R_MACH );
1189 store_spreg( R_EAX, R_MACL );
1190 }
1191 break;
1192 case 0xE:
1193 { /* ADDC Rm, Rn */
1194 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1195 load_reg( R_EAX, Rm );
1196 load_reg( R_ECX, Rn );
1197 LDC_t();
1198 ADC_r32_r32( R_EAX, R_ECX );
1199 store_reg( R_ECX, Rn );
1200 SETC_t();
1201 }
1202 break;
1203 case 0xF:
1204 { /* ADDV Rm, Rn */
1205 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1206 load_reg( R_EAX, Rm );
1207 load_reg( R_ECX, Rn );
1208 ADD_r32_r32( R_EAX, R_ECX );
1209 store_reg( R_ECX, Rn );
1210 SETO_t();
1211 }
1212 break;
1213 default:
1214 UNDEF();
1215 break;
1216 }
1217 break;
1218 case 0x4:
1219 switch( ir&0xF ) {
1220 case 0x0:
1221 switch( (ir&0xF0) >> 4 ) {
1222 case 0x0:
1223 { /* SHLL Rn */
1224 uint32_t Rn = ((ir>>8)&0xF);
1225 load_reg( R_EAX, Rn );
1226 SHL1_r32( R_EAX );
1227 SETC_t();
1228 store_reg( R_EAX, Rn );
1229 }
1230 break;
1231 case 0x1:
1232 { /* DT Rn */
1233 uint32_t Rn = ((ir>>8)&0xF);
1234 load_reg( R_EAX, Rn );
1235 ADD_imm8s_r32( -1, R_EAX );
1236 store_reg( R_EAX, Rn );
1237 SETE_t();
1238 }
1239 break;
1240 case 0x2:
1241 { /* SHAL Rn */
1242 uint32_t Rn = ((ir>>8)&0xF);
1243 load_reg( R_EAX, Rn );
1244 SHL1_r32( R_EAX );
1245 SETC_t();
1246 store_reg( R_EAX, Rn );
1247 }
1248 break;
1249 default:
1250 UNDEF();
1251 break;
1252 }
1253 break;
1254 case 0x1:
1255 switch( (ir&0xF0) >> 4 ) {
1256 case 0x0:
1257 { /* SHLR Rn */
1258 uint32_t Rn = ((ir>>8)&0xF);
1259 load_reg( R_EAX, Rn );
1260 SHR1_r32( R_EAX );
1261 SETC_t();
1262 store_reg( R_EAX, Rn );
1263 }
1264 break;
1265 case 0x1:
1266 { /* CMP/PZ Rn */
1267 uint32_t Rn = ((ir>>8)&0xF);
1268 load_reg( R_EAX, Rn );
1269 CMP_imm8s_r32( 0, R_EAX );
1270 SETGE_t();
1271 }
1272 break;
1273 case 0x2:
1274 { /* SHAR Rn */
1275 uint32_t Rn = ((ir>>8)&0xF);
1276 load_reg( R_EAX, Rn );
1277 SAR1_r32( R_EAX );
1278 SETC_t();
1279 store_reg( R_EAX, Rn );
1280 }
1281 break;
1282 default:
1283 UNDEF();
1284 break;
1285 }
1286 break;
1287 case 0x2:
1288 switch( (ir&0xF0) >> 4 ) {
1289 case 0x0:
1290 { /* STS.L MACH, @-Rn */
1291 uint32_t Rn = ((ir>>8)&0xF);
1292 load_reg( R_ECX, Rn );
1293 check_walign32( R_ECX );
1294 ADD_imm8s_r32( -4, R_ECX );
1295 store_reg( R_ECX, Rn );
1296 load_spreg( R_EAX, R_MACH );
1297 MEM_WRITE_LONG( R_ECX, R_EAX );
1298 }
1299 break;
1300 case 0x1:
1301 { /* STS.L MACL, @-Rn */
1302 uint32_t Rn = ((ir>>8)&0xF);
1303 load_reg( R_ECX, Rn );
1304 check_walign32( R_ECX );
1305 ADD_imm8s_r32( -4, R_ECX );
1306 store_reg( R_ECX, Rn );
1307 load_spreg( R_EAX, R_MACL );
1308 MEM_WRITE_LONG( R_ECX, R_EAX );
1309 }
1310 break;
1311 case 0x2:
1312 { /* STS.L PR, @-Rn */
1313 uint32_t Rn = ((ir>>8)&0xF);
1314 load_reg( R_ECX, Rn );
1315 check_walign32( R_ECX );
1316 ADD_imm8s_r32( -4, R_ECX );
1317 store_reg( R_ECX, Rn );
1318 load_spreg( R_EAX, R_PR );
1319 MEM_WRITE_LONG( R_ECX, R_EAX );
1320 }
1321 break;
1322 case 0x3:
1323 { /* STC.L SGR, @-Rn */
1324 uint32_t Rn = ((ir>>8)&0xF);
1325 check_priv();
1326 load_reg( R_ECX, Rn );
1327 check_walign32( R_ECX );
1328 ADD_imm8s_r32( -4, R_ECX );
1329 store_reg( R_ECX, Rn );
1330 load_spreg( R_EAX, R_SGR );
1331 MEM_WRITE_LONG( R_ECX, R_EAX );
1332 }
1333 break;
1334 case 0x5:
1335 { /* STS.L FPUL, @-Rn */
1336 uint32_t Rn = ((ir>>8)&0xF);
1337 load_reg( R_ECX, Rn );
1338 check_walign32( R_ECX );
1339 ADD_imm8s_r32( -4, R_ECX );
1340 store_reg( R_ECX, Rn );
1341 load_spreg( R_EAX, R_FPUL );
1342 MEM_WRITE_LONG( R_ECX, R_EAX );
1343 }
1344 break;
1345 case 0x6:
1346 { /* STS.L FPSCR, @-Rn */
1347 uint32_t Rn = ((ir>>8)&0xF);
1348 load_reg( R_ECX, Rn );
1349 check_walign32( R_ECX );
1350 ADD_imm8s_r32( -4, R_ECX );
1351 store_reg( R_ECX, Rn );
1352 load_spreg( R_EAX, R_FPSCR );
1353 MEM_WRITE_LONG( R_ECX, R_EAX );
1354 }
1355 break;
1356 case 0xF:
1357 { /* STC.L DBR, @-Rn */
1358 uint32_t Rn = ((ir>>8)&0xF);
1359 check_priv();
1360 load_reg( R_ECX, Rn );
1361 check_walign32( R_ECX );
1362 ADD_imm8s_r32( -4, R_ECX );
1363 store_reg( R_ECX, Rn );
1364 load_spreg( R_EAX, R_DBR );
1365 MEM_WRITE_LONG( R_ECX, R_EAX );
1366 }
1367 break;
1368 default:
1369 UNDEF();
1370 break;
1371 }
1372 break;
1373 case 0x3:
1374 switch( (ir&0x80) >> 7 ) {
1375 case 0x0:
1376 switch( (ir&0x70) >> 4 ) {
1377 case 0x0:
1378 { /* STC.L SR, @-Rn */
1379 uint32_t Rn = ((ir>>8)&0xF);
1380 check_priv();
1381 call_func0( sh4_read_sr );
1382 load_reg( R_ECX, Rn );
1383 check_walign32( R_ECX );
1384 ADD_imm8s_r32( -4, R_ECX );
1385 store_reg( R_ECX, Rn );
1386 MEM_WRITE_LONG( R_ECX, R_EAX );
1387 }
1388 break;
1389 case 0x1:
1390 { /* STC.L GBR, @-Rn */
1391 uint32_t Rn = ((ir>>8)&0xF);
1392 load_reg( R_ECX, Rn );
1393 check_walign32( R_ECX );
1394 ADD_imm8s_r32( -4, R_ECX );
1395 store_reg( R_ECX, Rn );
1396 load_spreg( R_EAX, R_GBR );
1397 MEM_WRITE_LONG( R_ECX, R_EAX );
1398 }
1399 break;
1400 case 0x2:
1401 { /* STC.L VBR, @-Rn */
1402 uint32_t Rn = ((ir>>8)&0xF);
1403 check_priv();
1404 load_reg( R_ECX, Rn );
1405 check_walign32( R_ECX );
1406 ADD_imm8s_r32( -4, R_ECX );
1407 store_reg( R_ECX, Rn );
1408 load_spreg( R_EAX, R_VBR );
1409 MEM_WRITE_LONG( R_ECX, R_EAX );
1410 }
1411 break;
1412 case 0x3:
1413 { /* STC.L SSR, @-Rn */
1414 uint32_t Rn = ((ir>>8)&0xF);
1415 check_priv();
1416 load_reg( R_ECX, Rn );
1417 check_walign32( R_ECX );
1418 ADD_imm8s_r32( -4, R_ECX );
1419 store_reg( R_ECX, Rn );
1420 load_spreg( R_EAX, R_SSR );
1421 MEM_WRITE_LONG( R_ECX, R_EAX );
1422 }
1423 break;
1424 case 0x4:
1425 { /* STC.L SPC, @-Rn */
1426 uint32_t Rn = ((ir>>8)&0xF);
1427 check_priv();
1428 load_reg( R_ECX, Rn );
1429 check_walign32( R_ECX );
1430 ADD_imm8s_r32( -4, R_ECX );
1431 store_reg( R_ECX, Rn );
1432 load_spreg( R_EAX, R_SPC );
1433 MEM_WRITE_LONG( R_ECX, R_EAX );
1434 }
1435 break;
1436 default:
1437 UNDEF();
1438 break;
1439 }
1440 break;
1441 case 0x1:
1442 { /* STC.L Rm_BANK, @-Rn */
1443 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm_BANK = ((ir>>4)&0x7);
1444 check_priv();
1445 load_reg( R_ECX, Rn );
1446 check_walign32( R_ECX );
1447 ADD_imm8s_r32( -4, R_ECX );
1448 store_reg( R_ECX, Rn );
1449 load_spreg( R_EAX, REG_OFFSET(r_bank[Rm_BANK]) );
1450 MEM_WRITE_LONG( R_ECX, R_EAX );
1451 }
1452 break;
1453 }
1454 break;
1455 case 0x4:
1456 switch( (ir&0xF0) >> 4 ) {
1457 case 0x0:
1458 { /* ROTL Rn */
1459 uint32_t Rn = ((ir>>8)&0xF);
1460 load_reg( R_EAX, Rn );
1461 ROL1_r32( R_EAX );
1462 store_reg( R_EAX, Rn );
1463 SETC_t();
1464 }
1465 break;
1466 case 0x2:
1467 { /* ROTCL Rn */
1468 uint32_t Rn = ((ir>>8)&0xF);
1469 load_reg( R_EAX, Rn );
1470 LDC_t();
1471 RCL1_r32( R_EAX );
1472 store_reg( R_EAX, Rn );
1473 SETC_t();
1474 }
1475 break;
1476 default:
1477 UNDEF();
1478 break;
1479 }
1480 break;
1481 case 0x5:
1482 switch( (ir&0xF0) >> 4 ) {
1483 case 0x0:
1484 { /* ROTR Rn */
1485 uint32_t Rn = ((ir>>8)&0xF);
1486 load_reg( R_EAX, Rn );
1487 ROR1_r32( R_EAX );
1488 store_reg( R_EAX, Rn );
1489 SETC_t();
1490 }
1491 break;
1492 case 0x1:
1493 { /* CMP/PL Rn */
1494 uint32_t Rn = ((ir>>8)&0xF);
1495 load_reg( R_EAX, Rn );
1496 CMP_imm8s_r32( 0, R_EAX );
1497 SETG_t();
1498 }
1499 break;
1500 case 0x2:
1501 { /* ROTCR Rn */
1502 uint32_t Rn = ((ir>>8)&0xF);
1503 load_reg( R_EAX, Rn );
1504 LDC_t();
1505 RCR1_r32( R_EAX );
1506 store_reg( R_EAX, Rn );
1507 SETC_t();
1508 }
1509 break;
1510 default:
1511 UNDEF();
1512 break;
1513 }
1514 break;
1515 case 0x6:
1516 switch( (ir&0xF0) >> 4 ) {
1517 case 0x0:
1518 { /* LDS.L @Rm+, MACH */
1519 uint32_t Rm = ((ir>>8)&0xF);
1520 load_reg( R_EAX, Rm );
1521 check_ralign32( R_EAX );
1522 MOV_r32_r32( R_EAX, R_ECX );
1523 ADD_imm8s_r32( 4, R_EAX );
1524 store_reg( R_EAX, Rm );
1525 MEM_READ_LONG( R_ECX, R_EAX );
1526 store_spreg( R_EAX, R_MACH );
1527 }
1528 break;
1529 case 0x1:
1530 { /* LDS.L @Rm+, MACL */
1531 uint32_t Rm = ((ir>>8)&0xF);
1532 load_reg( R_EAX, Rm );
1533 check_ralign32( R_EAX );
1534 MOV_r32_r32( R_EAX, R_ECX );
1535 ADD_imm8s_r32( 4, R_EAX );
1536 store_reg( R_EAX, Rm );
1537 MEM_READ_LONG( R_ECX, R_EAX );
1538 store_spreg( R_EAX, R_MACL );
1539 }
1540 break;
1541 case 0x2:
1542 { /* LDS.L @Rm+, PR */
1543 uint32_t Rm = ((ir>>8)&0xF);
1544 load_reg( R_EAX, Rm );
1545 check_ralign32( R_EAX );
1546 MOV_r32_r32( R_EAX, R_ECX );
1547 ADD_imm8s_r32( 4, R_EAX );
1548 store_reg( R_EAX, Rm );
1549 MEM_READ_LONG( R_ECX, R_EAX );
1550 store_spreg( R_EAX, R_PR );
1551 }
1552 break;
1553 case 0x3:
1554 { /* LDC.L @Rm+, SGR */
1555 uint32_t Rm = ((ir>>8)&0xF);
1556 check_priv();
1557 load_reg( R_EAX, Rm );
1558 check_ralign32( R_EAX );
1559 MOV_r32_r32( R_EAX, R_ECX );
1560 ADD_imm8s_r32( 4, R_EAX );
1561 store_reg( R_EAX, Rm );
1562 MEM_READ_LONG( R_ECX, R_EAX );
1563 store_spreg( R_EAX, R_SGR );
1564 }
1565 break;
1566 case 0x5:
1567 { /* LDS.L @Rm+, FPUL */
1568 uint32_t Rm = ((ir>>8)&0xF);
1569 load_reg( R_EAX, Rm );
1570 check_ralign32( R_EAX );
1571 MOV_r32_r32( R_EAX, R_ECX );
1572 ADD_imm8s_r32( 4, R_EAX );
1573 store_reg( R_EAX, Rm );
1574 MEM_READ_LONG( R_ECX, R_EAX );
1575 store_spreg( R_EAX, R_FPUL );
1576 }
1577 break;
1578 case 0x6:
1579 { /* LDS.L @Rm+, FPSCR */
1580 uint32_t Rm = ((ir>>8)&0xF);
1581 load_reg( R_EAX, Rm );
1582 check_ralign32( R_EAX );
1583 MOV_r32_r32( R_EAX, R_ECX );
1584 ADD_imm8s_r32( 4, R_EAX );
1585 store_reg( R_EAX, Rm );
1586 MEM_READ_LONG( R_ECX, R_EAX );
1587 store_spreg( R_EAX, R_FPSCR );
1588 update_fr_bank( R_EAX );
1589 }
1590 break;
1591 case 0xF:
1592 { /* LDC.L @Rm+, DBR */
1593 uint32_t Rm = ((ir>>8)&0xF);
1594 check_priv();
1595 load_reg( R_EAX, Rm );
1596 check_ralign32( R_EAX );
1597 MOV_r32_r32( R_EAX, R_ECX );
1598 ADD_imm8s_r32( 4, R_EAX );
1599 store_reg( R_EAX, Rm );
1600 MEM_READ_LONG( R_ECX, R_EAX );
1601 store_spreg( R_EAX, R_DBR );
1602 }
1603 break;
1604 default:
1605 UNDEF();
1606 break;
1607 }
1608 break;
1609 case 0x7:
1610 switch( (ir&0x80) >> 7 ) {
1611 case 0x0:
1612 switch( (ir&0x70) >> 4 ) {
1613 case 0x0:
1614 { /* LDC.L @Rm+, SR */
1615 uint32_t Rm = ((ir>>8)&0xF);
1616 if( sh4_x86.in_delay_slot ) {
1617 SLOTILLEGAL();
1618 } else {
1619 check_priv();
1620 load_reg( R_EAX, Rm );
1621 check_ralign32( R_EAX );
1622 MOV_r32_r32( R_EAX, R_ECX );
1623 ADD_imm8s_r32( 4, R_EAX );
1624 store_reg( R_EAX, Rm );
1625 MEM_READ_LONG( R_ECX, R_EAX );
1626 call_func1( sh4_write_sr, R_EAX );
1627 sh4_x86.priv_checked = FALSE;
1628 sh4_x86.fpuen_checked = FALSE;
1629 }
1630 }
1631 break;
1632 case 0x1:
1633 { /* LDC.L @Rm+, GBR */
1634 uint32_t Rm = ((ir>>8)&0xF);
1635 load_reg( R_EAX, Rm );
1636 check_ralign32( R_EAX );
1637 MOV_r32_r32( R_EAX, R_ECX );
1638 ADD_imm8s_r32( 4, R_EAX );
1639 store_reg( R_EAX, Rm );
1640 MEM_READ_LONG( R_ECX, R_EAX );
1641 store_spreg( R_EAX, R_GBR );
1642 }
1643 break;
1644 case 0x2:
1645 { /* LDC.L @Rm+, VBR */
1646 uint32_t Rm = ((ir>>8)&0xF);
1647 check_priv();
1648 load_reg( R_EAX, Rm );
1649 check_ralign32( R_EAX );
1650 MOV_r32_r32( R_EAX, R_ECX );
1651 ADD_imm8s_r32( 4, R_EAX );
1652 store_reg( R_EAX, Rm );
1653 MEM_READ_LONG( R_ECX, R_EAX );
1654 store_spreg( R_EAX, R_VBR );
1655 }
1656 break;
1657 case 0x3:
1658 { /* LDC.L @Rm+, SSR */
1659 uint32_t Rm = ((ir>>8)&0xF);
1660 check_priv();
1661 load_reg( R_EAX, Rm );
1662 MOV_r32_r32( R_EAX, R_ECX );
1663 ADD_imm8s_r32( 4, R_EAX );
1664 store_reg( R_EAX, Rm );
1665 MEM_READ_LONG( R_ECX, R_EAX );
1666 store_spreg( R_EAX, R_SSR );
1667 }
1668 break;
1669 case 0x4:
1670 { /* LDC.L @Rm+, SPC */
1671 uint32_t Rm = ((ir>>8)&0xF);
1672 check_priv();
1673 load_reg( R_EAX, Rm );
1674 check_ralign32( R_EAX );
1675 MOV_r32_r32( R_EAX, R_ECX );
1676 ADD_imm8s_r32( 4, R_EAX );
1677 store_reg( R_EAX, Rm );
1678 MEM_READ_LONG( R_ECX, R_EAX );
1679 store_spreg( R_EAX, R_SPC );
1680 }
1681 break;
1682 default:
1683 UNDEF();
1684 break;
1685 }
1686 break;
1687 case 0x1:
1688 { /* LDC.L @Rm+, Rn_BANK */
1689 uint32_t Rm = ((ir>>8)&0xF); uint32_t Rn_BANK = ((ir>>4)&0x7);
1690 check_priv();
1691 load_reg( R_EAX, Rm );
1692 check_ralign32( R_EAX );
1693 MOV_r32_r32( R_EAX, R_ECX );
1694 ADD_imm8s_r32( 4, R_EAX );
1695 store_reg( R_EAX, Rm );
1696 MEM_READ_LONG( R_ECX, R_EAX );
1697 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
1698 }
1699 break;
1700 }
1701 break;
1702 case 0x8:
1703 switch( (ir&0xF0) >> 4 ) {
1704 case 0x0:
1705 { /* SHLL2 Rn */
1706 uint32_t Rn = ((ir>>8)&0xF);
1707 load_reg( R_EAX, Rn );
1708 SHL_imm8_r32( 2, R_EAX );
1709 store_reg( R_EAX, Rn );
1710 }
1711 break;
1712 case 0x1:
1713 { /* SHLL8 Rn */
1714 uint32_t Rn = ((ir>>8)&0xF);
1715 load_reg( R_EAX, Rn );
1716 SHL_imm8_r32( 8, R_EAX );
1717 store_reg( R_EAX, Rn );
1718 }
1719 break;
1720 case 0x2:
1721 { /* SHLL16 Rn */
1722 uint32_t Rn = ((ir>>8)&0xF);
1723 load_reg( R_EAX, Rn );
1724 SHL_imm8_r32( 16, R_EAX );
1725 store_reg( R_EAX, Rn );
1726 }
1727 break;
1728 default:
1729 UNDEF();
1730 break;
1731 }
1732 break;
1733 case 0x9:
1734 switch( (ir&0xF0) >> 4 ) {
1735 case 0x0:
1736 { /* SHLR2 Rn */
1737 uint32_t Rn = ((ir>>8)&0xF);
1738 load_reg( R_EAX, Rn );
1739 SHR_imm8_r32( 2, R_EAX );
1740 store_reg( R_EAX, Rn );
1741 }
1742 break;
1743 case 0x1:
1744 { /* SHLR8 Rn */
1745 uint32_t Rn = ((ir>>8)&0xF);
1746 load_reg( R_EAX, Rn );
1747 SHR_imm8_r32( 8, R_EAX );
1748 store_reg( R_EAX, Rn );
1749 }
1750 break;
1751 case 0x2:
1752 { /* SHLR16 Rn */
1753 uint32_t Rn = ((ir>>8)&0xF);
1754 load_reg( R_EAX, Rn );
1755 SHR_imm8_r32( 16, R_EAX );
1756 store_reg( R_EAX, Rn );
1757 }
1758 break;
1759 default:
1760 UNDEF();
1761 break;
1762 }
1763 break;
1764 case 0xA:
1765 switch( (ir&0xF0) >> 4 ) {
1766 case 0x0:
1767 { /* LDS Rm, MACH */
1768 uint32_t Rm = ((ir>>8)&0xF);
1769 load_reg( R_EAX, Rm );
1770 store_spreg( R_EAX, R_MACH );
1771 }
1772 break;
1773 case 0x1:
1774 { /* LDS Rm, MACL */
1775 uint32_t Rm = ((ir>>8)&0xF);
1776 load_reg( R_EAX, Rm );
1777 store_spreg( R_EAX, R_MACL );
1778 }
1779 break;
1780 case 0x2:
1781 { /* LDS Rm, PR */
1782 uint32_t Rm = ((ir>>8)&0xF);
1783 load_reg( R_EAX, Rm );
1784 store_spreg( R_EAX, R_PR );
1785 }
1786 break;
1787 case 0x3:
1788 { /* LDC Rm, SGR */
1789 uint32_t Rm = ((ir>>8)&0xF);
1790 check_priv();
1791 load_reg( R_EAX, Rm );
1792 store_spreg( R_EAX, R_SGR );
1793 }
1794 break;
1795 case 0x5:
1796 { /* LDS Rm, FPUL */
1797 uint32_t Rm = ((ir>>8)&0xF);
1798 load_reg( R_EAX, Rm );
1799 store_spreg( R_EAX, R_FPUL );
1800 }
1801 break;
1802 case 0x6:
1803 { /* LDS Rm, FPSCR */
1804 uint32_t Rm = ((ir>>8)&0xF);
1805 load_reg( R_EAX, Rm );
1806 store_spreg( R_EAX, R_FPSCR );
1807 update_fr_bank( R_EAX );
1808 }
1809 break;
1810 case 0xF:
1811 { /* LDC Rm, DBR */
1812 uint32_t Rm = ((ir>>8)&0xF);
1813 check_priv();
1814 load_reg( R_EAX, Rm );
1815 store_spreg( R_EAX, R_DBR );
1816 }
1817 break;
1818 default:
1819 UNDEF();
1820 break;
1821 }
1822 break;
1823 case 0xB:
1824 switch( (ir&0xF0) >> 4 ) {
1825 case 0x0:
1826 { /* JSR @Rn */
1827 uint32_t Rn = ((ir>>8)&0xF);
1828 if( sh4_x86.in_delay_slot ) {
1829 SLOTILLEGAL();
1830 } else {
1831 load_imm32( R_EAX, pc + 4 );
1832 store_spreg( R_EAX, R_PR );
1833 load_reg( R_EDI, Rn );
1834 sh4_x86.in_delay_slot = TRUE;
1835 return 0;
1836 }
1837 }
1838 break;
1839 case 0x1:
1840 { /* TAS.B @Rn */
1841 uint32_t Rn = ((ir>>8)&0xF);
1842 load_reg( R_ECX, Rn );
1843 MEM_READ_BYTE( R_ECX, R_EAX );
1844 TEST_r8_r8( R_AL, R_AL );
1845 SETE_t();
1846 OR_imm8_r8( 0x80, R_AL );
1847 load_reg( R_ECX, Rn );
1848 MEM_WRITE_BYTE( R_ECX, R_EAX );
1849 }
1850 break;
1851 case 0x2:
1852 { /* JMP @Rn */
1853 uint32_t Rn = ((ir>>8)&0xF);
1854 if( sh4_x86.in_delay_slot ) {
1855 SLOTILLEGAL();
1856 } else {
1857 load_reg( R_EDI, Rn );
1858 sh4_x86.in_delay_slot = TRUE;
1859 return 0;
1860 }
1861 }
1862 break;
1863 default:
1864 UNDEF();
1865 break;
1866 }
1867 break;
1868 case 0xC:
1869 { /* SHAD Rm, Rn */
1870 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1871 /* Annoyingly enough, not directly convertible */
1872 load_reg( R_EAX, Rn );
1873 load_reg( R_ECX, Rm );
1874 CMP_imm32_r32( 0, R_ECX );
1875 JGE_rel8(16, doshl);
1877 NEG_r32( R_ECX ); // 2
1878 AND_imm8_r8( 0x1F, R_CL ); // 3
1879 JE_rel8( 4, emptysar); // 2
1880 SAR_r32_CL( R_EAX ); // 2
1881 JMP_rel8(10, end); // 2
1883 JMP_TARGET(emptysar);
1884 SAR_imm8_r32(31, R_EAX ); // 3
1885 JMP_rel8(5, end2);
1887 JMP_TARGET(doshl);
1888 AND_imm8_r8( 0x1F, R_CL ); // 3
1889 SHL_r32_CL( R_EAX ); // 2
1890 JMP_TARGET(end);
1891 JMP_TARGET(end2);
1892 store_reg( R_EAX, Rn );
1893 }
1894 break;
1895 case 0xD:
1896 { /* SHLD Rm, Rn */
1897 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1898 load_reg( R_EAX, Rn );
1899 load_reg( R_ECX, Rm );
1900 CMP_imm32_r32( 0, R_ECX );
1901 JGE_rel8(15, doshl);
1903 NEG_r32( R_ECX ); // 2
1904 AND_imm8_r8( 0x1F, R_CL ); // 3
1905 JE_rel8( 4, emptyshr );
1906 SHR_r32_CL( R_EAX ); // 2
1907 JMP_rel8(9, end); // 2
1909 JMP_TARGET(emptyshr);
1910 XOR_r32_r32( R_EAX, R_EAX );
1911 JMP_rel8(5, end2);
1913 JMP_TARGET(doshl);
1914 AND_imm8_r8( 0x1F, R_CL ); // 3
1915 SHL_r32_CL( R_EAX ); // 2
1916 JMP_TARGET(end);
1917 JMP_TARGET(end2);
1918 store_reg( R_EAX, Rn );
1919 }
1920 break;
1921 case 0xE:
1922 switch( (ir&0x80) >> 7 ) {
1923 case 0x0:
1924 switch( (ir&0x70) >> 4 ) {
1925 case 0x0:
1926 { /* LDC Rm, SR */
1927 uint32_t Rm = ((ir>>8)&0xF);
1928 if( sh4_x86.in_delay_slot ) {
1929 SLOTILLEGAL();
1930 } else {
1931 check_priv();
1932 load_reg( R_EAX, Rm );
1933 call_func1( sh4_write_sr, R_EAX );
1934 sh4_x86.priv_checked = FALSE;
1935 sh4_x86.fpuen_checked = FALSE;
1936 }
1937 }
1938 break;
1939 case 0x1:
1940 { /* LDC Rm, GBR */
1941 uint32_t Rm = ((ir>>8)&0xF);
1942 load_reg( R_EAX, Rm );
1943 store_spreg( R_EAX, R_GBR );
1944 }
1945 break;
1946 case 0x2:
1947 { /* LDC Rm, VBR */
1948 uint32_t Rm = ((ir>>8)&0xF);
1949 check_priv();
1950 load_reg( R_EAX, Rm );
1951 store_spreg( R_EAX, R_VBR );
1952 }
1953 break;
1954 case 0x3:
1955 { /* LDC Rm, SSR */
1956 uint32_t Rm = ((ir>>8)&0xF);
1957 check_priv();
1958 load_reg( R_EAX, Rm );
1959 store_spreg( R_EAX, R_SSR );
1960 }
1961 break;
1962 case 0x4:
1963 { /* LDC Rm, SPC */
1964 uint32_t Rm = ((ir>>8)&0xF);
1965 check_priv();
1966 load_reg( R_EAX, Rm );
1967 store_spreg( R_EAX, R_SPC );
1968 }
1969 break;
1970 default:
1971 UNDEF();
1972 break;
1973 }
1974 break;
1975 case 0x1:
1976 { /* LDC Rm, Rn_BANK */
1977 uint32_t Rm = ((ir>>8)&0xF); uint32_t Rn_BANK = ((ir>>4)&0x7);
1978 check_priv();
1979 load_reg( R_EAX, Rm );
1980 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
1981 }
1982 break;
1983 }
1984 break;
1985 case 0xF:
1986 { /* MAC.W @Rm+, @Rn+ */
1987 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
1988 load_reg( R_ECX, Rm );
1989 check_ralign16( R_ECX );
1990 load_reg( R_ECX, Rn );
1991 check_ralign16( R_ECX );
1992 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
1993 MEM_READ_WORD( R_ECX, R_EAX );
1994 PUSH_r32( R_EAX );
1995 load_reg( R_ECX, Rm );
1996 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
1997 MEM_READ_WORD( R_ECX, R_EAX );
1998 POP_r32( R_ECX );
1999 IMUL_r32( R_ECX );
2001 load_spreg( R_ECX, R_S );
2002 TEST_r32_r32( R_ECX, R_ECX );
2003 JE_rel8( 47, nosat );
2005 ADD_r32_sh4r( R_EAX, R_MACL ); // 6
2006 JNO_rel8( 51, end ); // 2
2007 load_imm32( R_EDX, 1 ); // 5
2008 store_spreg( R_EDX, R_MACH ); // 6
2009 JS_rel8( 13, positive ); // 2
2010 load_imm32( R_EAX, 0x80000000 );// 5
2011 store_spreg( R_EAX, R_MACL ); // 6
2012 JMP_rel8( 25, end2 ); // 2
2014 JMP_TARGET(positive);
2015 load_imm32( R_EAX, 0x7FFFFFFF );// 5
2016 store_spreg( R_EAX, R_MACL ); // 6
2017 JMP_rel8( 12, end3); // 2
2019 JMP_TARGET(nosat);
2020 ADD_r32_sh4r( R_EAX, R_MACL ); // 6
2021 ADC_r32_sh4r( R_EDX, R_MACH ); // 6
2022 JMP_TARGET(end);
2023 JMP_TARGET(end2);
2024 JMP_TARGET(end3);
2025 }
2026 break;
2027 }
2028 break;
2029 case 0x5:
2030 { /* MOV.L @(disp, Rm), Rn */
2031 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2;
2032 load_reg( R_ECX, Rm );
2033 ADD_imm8s_r32( disp, R_ECX );
2034 check_ralign32( R_ECX );
2035 MEM_READ_LONG( R_ECX, R_EAX );
2036 store_reg( R_EAX, Rn );
2037 }
2038 break;
2039 case 0x6:
2040 switch( ir&0xF ) {
2041 case 0x0:
2042 { /* MOV.B @Rm, Rn */
2043 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2044 load_reg( R_ECX, Rm );
2045 MEM_READ_BYTE( R_ECX, R_EAX );
2046 store_reg( R_EAX, Rn );
2047 }
2048 break;
2049 case 0x1:
2050 { /* MOV.W @Rm, Rn */
2051 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2052 load_reg( R_ECX, Rm );
2053 check_ralign16( R_ECX );
2054 MEM_READ_WORD( R_ECX, R_EAX );
2055 store_reg( R_EAX, Rn );
2056 }
2057 break;
2058 case 0x2:
2059 { /* MOV.L @Rm, Rn */
2060 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2061 load_reg( R_ECX, Rm );
2062 check_ralign32( R_ECX );
2063 MEM_READ_LONG( R_ECX, R_EAX );
2064 store_reg( R_EAX, Rn );
2065 }
2066 break;
2067 case 0x3:
2068 { /* MOV Rm, Rn */
2069 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2070 load_reg( R_EAX, Rm );
2071 store_reg( R_EAX, Rn );
2072 }
2073 break;
2074 case 0x4:
2075 { /* MOV.B @Rm+, Rn */
2076 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2077 load_reg( R_ECX, Rm );
2078 MOV_r32_r32( R_ECX, R_EAX );
2079 ADD_imm8s_r32( 1, R_EAX );
2080 store_reg( R_EAX, Rm );
2081 MEM_READ_BYTE( R_ECX, R_EAX );
2082 store_reg( R_EAX, Rn );
2083 }
2084 break;
2085 case 0x5:
2086 { /* MOV.W @Rm+, Rn */
2087 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2088 load_reg( R_EAX, Rm );
2089 check_ralign16( R_EAX );
2090 MOV_r32_r32( R_EAX, R_ECX );
2091 ADD_imm8s_r32( 2, R_EAX );
2092 store_reg( R_EAX, Rm );
2093 MEM_READ_WORD( R_ECX, R_EAX );
2094 store_reg( R_EAX, Rn );
2095 }
2096 break;
2097 case 0x6:
2098 { /* MOV.L @Rm+, Rn */
2099 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2100 load_reg( R_EAX, Rm );
2101 check_ralign32( R_EAX );
2102 MOV_r32_r32( R_EAX, R_ECX );
2103 ADD_imm8s_r32( 4, R_EAX );
2104 store_reg( R_EAX, Rm );
2105 MEM_READ_LONG( R_ECX, R_EAX );
2106 store_reg( R_EAX, Rn );
2107 }
2108 break;
2109 case 0x7:
2110 { /* NOT Rm, Rn */
2111 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2112 load_reg( R_EAX, Rm );
2113 NOT_r32( R_EAX );
2114 store_reg( R_EAX, Rn );
2115 }
2116 break;
2117 case 0x8:
2118 { /* SWAP.B Rm, Rn */
2119 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2120 load_reg( R_EAX, Rm );
2121 XCHG_r8_r8( R_AL, R_AH );
2122 store_reg( R_EAX, Rn );
2123 }
2124 break;
2125 case 0x9:
2126 { /* SWAP.W Rm, Rn */
2127 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2128 load_reg( R_EAX, Rm );
2129 MOV_r32_r32( R_EAX, R_ECX );
2130 SHL_imm8_r32( 16, R_ECX );
2131 SHR_imm8_r32( 16, R_EAX );
2132 OR_r32_r32( R_EAX, R_ECX );
2133 store_reg( R_ECX, Rn );
2134 }
2135 break;
2136 case 0xA:
2137 { /* NEGC Rm, Rn */
2138 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2139 load_reg( R_EAX, Rm );
2140 XOR_r32_r32( R_ECX, R_ECX );
2141 LDC_t();
2142 SBB_r32_r32( R_EAX, R_ECX );
2143 store_reg( R_ECX, Rn );
2144 SETC_t();
2145 }
2146 break;
2147 case 0xB:
2148 { /* NEG Rm, Rn */
2149 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2150 load_reg( R_EAX, Rm );
2151 NEG_r32( R_EAX );
2152 store_reg( R_EAX, Rn );
2153 }
2154 break;
2155 case 0xC:
2156 { /* EXTU.B Rm, Rn */
2157 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2158 load_reg( R_EAX, Rm );
2159 MOVZX_r8_r32( R_EAX, R_EAX );
2160 store_reg( R_EAX, Rn );
2161 }
2162 break;
2163 case 0xD:
2164 { /* EXTU.W Rm, Rn */
2165 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2166 load_reg( R_EAX, Rm );
2167 MOVZX_r16_r32( R_EAX, R_EAX );
2168 store_reg( R_EAX, Rn );
2169 }
2170 break;
2171 case 0xE:
2172 { /* EXTS.B Rm, Rn */
2173 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2174 load_reg( R_EAX, Rm );
2175 MOVSX_r8_r32( R_EAX, R_EAX );
2176 store_reg( R_EAX, Rn );
2177 }
2178 break;
2179 case 0xF:
2180 { /* EXTS.W Rm, Rn */
2181 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2182 load_reg( R_EAX, Rm );
2183 MOVSX_r16_r32( R_EAX, R_EAX );
2184 store_reg( R_EAX, Rn );
2185 }
2186 break;
2187 }
2188 break;
2189 case 0x7:
2190 { /* ADD #imm, Rn */
2191 uint32_t Rn = ((ir>>8)&0xF); int32_t imm = SIGNEXT8(ir&0xFF);
2192 load_reg( R_EAX, Rn );
2193 ADD_imm8s_r32( imm, R_EAX );
2194 store_reg( R_EAX, Rn );
2195 }
2196 break;
2197 case 0x8:
2198 switch( (ir&0xF00) >> 8 ) {
2199 case 0x0:
2200 { /* MOV.B R0, @(disp, Rn) */
2201 uint32_t Rn = ((ir>>4)&0xF); uint32_t disp = (ir&0xF);
2202 load_reg( R_EAX, 0 );
2203 load_reg( R_ECX, Rn );
2204 ADD_imm32_r32( disp, R_ECX );
2205 MEM_WRITE_BYTE( R_ECX, R_EAX );
2206 }
2207 break;
2208 case 0x1:
2209 { /* MOV.W R0, @(disp, Rn) */
2210 uint32_t Rn = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1;
2211 load_reg( R_ECX, Rn );
2212 load_reg( R_EAX, 0 );
2213 ADD_imm32_r32( disp, R_ECX );
2214 check_walign16( R_ECX );
2215 MEM_WRITE_WORD( R_ECX, R_EAX );
2216 }
2217 break;
2218 case 0x4:
2219 { /* MOV.B @(disp, Rm), R0 */
2220 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF);
2221 load_reg( R_ECX, Rm );
2222 ADD_imm32_r32( disp, R_ECX );
2223 MEM_READ_BYTE( R_ECX, R_EAX );
2224 store_reg( R_EAX, 0 );
2225 }
2226 break;
2227 case 0x5:
2228 { /* MOV.W @(disp, Rm), R0 */
2229 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1;
2230 load_reg( R_ECX, Rm );
2231 ADD_imm32_r32( disp, R_ECX );
2232 check_ralign16( R_ECX );
2233 MEM_READ_WORD( R_ECX, R_EAX );
2234 store_reg( R_EAX, 0 );
2235 }
2236 break;
2237 case 0x8:
2238 { /* CMP/EQ #imm, R0 */
2239 int32_t imm = SIGNEXT8(ir&0xFF);
2240 load_reg( R_EAX, 0 );
2241 CMP_imm8s_r32(imm, R_EAX);
2242 SETE_t();
2243 }
2244 break;
2245 case 0x9:
2246 { /* BT disp */
2247 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2248 if( sh4_x86.in_delay_slot ) {
2249 SLOTILLEGAL();
2250 } else {
2251 load_imm32( R_EDI, pc + 2 );
2252 CMP_imm8s_sh4r( 0, R_T );
2253 JE_rel8( 5, nottaken );
2254 load_imm32( R_EDI, disp + pc + 4 );
2255 JMP_TARGET(nottaken);
2256 INC_r32(R_ESI);
2257 return 1;
2258 }
2259 }
2260 break;
2261 case 0xB:
2262 { /* BF disp */
2263 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2264 if( sh4_x86.in_delay_slot ) {
2265 SLOTILLEGAL();
2266 } else {
2267 load_imm32( R_EDI, pc + 2 );
2268 CMP_imm8s_sh4r( 0, R_T );
2269 JNE_rel8( 5, nottaken );
2270 load_imm32( R_EDI, disp + pc + 4 );
2271 JMP_TARGET(nottaken);
2272 INC_r32(R_ESI);
2273 return 1;
2274 }
2275 }
2276 break;
2277 case 0xD:
2278 { /* BT/S disp */
2279 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2280 if( sh4_x86.in_delay_slot ) {
2281 SLOTILLEGAL();
2282 } else {
2283 load_imm32( R_EDI, pc + 4 );
2284 CMP_imm8s_sh4r( 0, R_T );
2285 JE_rel8( 5, nottaken );
2286 load_imm32( R_EDI, disp + pc + 4 );
2287 JMP_TARGET(nottaken);
2288 sh4_x86.in_delay_slot = TRUE;
2289 return 0;
2290 }
2291 }
2292 break;
2293 case 0xF:
2294 { /* BF/S disp */
2295 int32_t disp = SIGNEXT8(ir&0xFF)<<1;
2296 if( sh4_x86.in_delay_slot ) {
2297 SLOTILLEGAL();
2298 } else {
2299 load_imm32( R_EDI, pc + 4 );
2300 CMP_imm8s_sh4r( 0, R_T );
2301 JNE_rel8( 5, nottaken );
2302 load_imm32( R_EDI, disp + pc + 4 );
2303 JMP_TARGET(nottaken);
2304 sh4_x86.in_delay_slot = TRUE;
2305 return 0;
2306 }
2307 }
2308 break;
2309 default:
2310 UNDEF();
2311 break;
2312 }
2313 break;
2314 case 0x9:
2315 { /* MOV.W @(disp, PC), Rn */
2316 uint32_t Rn = ((ir>>8)&0xF); uint32_t disp = (ir&0xFF)<<1;
2317 if( sh4_x86.in_delay_slot ) {
2318 SLOTILLEGAL();
2319 } else {
2320 load_imm32( R_ECX, pc + disp + 4 );
2321 MEM_READ_WORD( R_ECX, R_EAX );
2322 store_reg( R_EAX, Rn );
2323 }
2324 }
2325 break;
2326 case 0xA:
2327 { /* BRA disp */
2328 int32_t disp = SIGNEXT12(ir&0xFFF)<<1;
2329 if( sh4_x86.in_delay_slot ) {
2330 SLOTILLEGAL();
2331 } else {
2332 load_imm32( R_EDI, disp + pc + 4 );
2333 sh4_x86.in_delay_slot = TRUE;
2334 return 0;
2335 }
2336 }
2337 break;
2338 case 0xB:
2339 { /* BSR disp */
2340 int32_t disp = SIGNEXT12(ir&0xFFF)<<1;
2341 if( sh4_x86.in_delay_slot ) {
2342 SLOTILLEGAL();
2343 } else {
2344 load_imm32( R_EAX, pc + 4 );
2345 store_spreg( R_EAX, R_PR );
2346 load_imm32( R_EDI, disp + pc + 4 );
2347 sh4_x86.in_delay_slot = TRUE;
2348 return 0;
2349 }
2350 }
2351 break;
2352 case 0xC:
2353 switch( (ir&0xF00) >> 8 ) {
2354 case 0x0:
2355 { /* MOV.B R0, @(disp, GBR) */
2356 uint32_t disp = (ir&0xFF);
2357 load_reg( R_EAX, 0 );
2358 load_spreg( R_ECX, R_GBR );
2359 ADD_imm32_r32( disp, R_ECX );
2360 MEM_WRITE_BYTE( R_ECX, R_EAX );
2361 }
2362 break;
2363 case 0x1:
2364 { /* MOV.W R0, @(disp, GBR) */
2365 uint32_t disp = (ir&0xFF)<<1;
2366 load_spreg( R_ECX, R_GBR );
2367 load_reg( R_EAX, 0 );
2368 ADD_imm32_r32( disp, R_ECX );
2369 check_walign16( R_ECX );
2370 MEM_WRITE_WORD( R_ECX, R_EAX );
2371 }
2372 break;
2373 case 0x2:
2374 { /* MOV.L R0, @(disp, GBR) */
2375 uint32_t disp = (ir&0xFF)<<2;
2376 load_spreg( R_ECX, R_GBR );
2377 load_reg( R_EAX, 0 );
2378 ADD_imm32_r32( disp, R_ECX );
2379 check_walign32( R_ECX );
2380 MEM_WRITE_LONG( R_ECX, R_EAX );
2381 }
2382 break;
2383 case 0x3:
2384 { /* TRAPA #imm */
2385 uint32_t imm = (ir&0xFF);
2386 if( sh4_x86.in_delay_slot ) {
2387 SLOTILLEGAL();
2388 } else {
2389 PUSH_imm32( imm );
2390 call_func0( sh4_raise_trap );
2391 ADD_imm8s_r32( 4, R_ESP );
2392 }
2393 }
2394 break;
2395 case 0x4:
2396 { /* MOV.B @(disp, GBR), R0 */
2397 uint32_t disp = (ir&0xFF);
2398 load_spreg( R_ECX, R_GBR );
2399 ADD_imm32_r32( disp, R_ECX );
2400 MEM_READ_BYTE( R_ECX, R_EAX );
2401 store_reg( R_EAX, 0 );
2402 }
2403 break;
2404 case 0x5:
2405 { /* MOV.W @(disp, GBR), R0 */
2406 uint32_t disp = (ir&0xFF)<<1;
2407 load_spreg( R_ECX, R_GBR );
2408 ADD_imm32_r32( disp, R_ECX );
2409 check_ralign16( R_ECX );
2410 MEM_READ_WORD( R_ECX, R_EAX );
2411 store_reg( R_EAX, 0 );
2412 }
2413 break;
2414 case 0x6:
2415 { /* MOV.L @(disp, GBR), R0 */
2416 uint32_t disp = (ir&0xFF)<<2;
2417 load_spreg( R_ECX, R_GBR );
2418 ADD_imm32_r32( disp, R_ECX );
2419 check_ralign32( R_ECX );
2420 MEM_READ_LONG( R_ECX, R_EAX );
2421 store_reg( R_EAX, 0 );
2422 }
2423 break;
2424 case 0x7:
2425 { /* MOVA @(disp, PC), R0 */
2426 uint32_t disp = (ir&0xFF)<<2;
2427 if( sh4_x86.in_delay_slot ) {
2428 SLOTILLEGAL();
2429 } else {
2430 load_imm32( R_ECX, (pc & 0xFFFFFFFC) + disp + 4 );
2431 store_reg( R_ECX, 0 );
2432 }
2433 }
2434 break;
2435 case 0x8:
2436 { /* TST #imm, R0 */
2437 uint32_t imm = (ir&0xFF);
2438 load_reg( R_EAX, 0 );
2439 TEST_imm32_r32( imm, R_EAX );
2440 SETE_t();
2441 }
2442 break;
2443 case 0x9:
2444 { /* AND #imm, R0 */
2445 uint32_t imm = (ir&0xFF);
2446 load_reg( R_EAX, 0 );
2447 AND_imm32_r32(imm, R_EAX);
2448 store_reg( R_EAX, 0 );
2449 }
2450 break;
2451 case 0xA:
2452 { /* XOR #imm, R0 */
2453 uint32_t imm = (ir&0xFF);
2454 load_reg( R_EAX, 0 );
2455 XOR_imm32_r32( imm, R_EAX );
2456 store_reg( R_EAX, 0 );
2457 }
2458 break;
2459 case 0xB:
2460 { /* OR #imm, R0 */
2461 uint32_t imm = (ir&0xFF);
2462 load_reg( R_EAX, 0 );
2463 OR_imm32_r32(imm, R_EAX);
2464 store_reg( R_EAX, 0 );
2465 }
2466 break;
2467 case 0xC:
2468 { /* TST.B #imm, @(R0, GBR) */
2469 uint32_t imm = (ir&0xFF);
2470 load_reg( R_EAX, 0);
2471 load_reg( R_ECX, R_GBR);
2472 ADD_r32_r32( R_EAX, R_ECX );
2473 MEM_READ_BYTE( R_ECX, R_EAX );
2474 TEST_imm8_r8( imm, R_AL );
2475 SETE_t();
2476 }
2477 break;
2478 case 0xD:
2479 { /* AND.B #imm, @(R0, GBR) */
2480 uint32_t imm = (ir&0xFF);
2481 load_reg( R_EAX, 0 );
2482 load_spreg( R_ECX, R_GBR );
2483 ADD_r32_r32( R_EAX, R_ECX );
2484 PUSH_r32(R_ECX);
2485 call_func0(sh4_read_byte);
2486 POP_r32(R_ECX);
2487 AND_imm32_r32(imm, R_EAX );
2488 MEM_WRITE_BYTE( R_ECX, R_EAX );
2489 }
2490 break;
2491 case 0xE:
2492 { /* XOR.B #imm, @(R0, GBR) */
2493 uint32_t imm = (ir&0xFF);
2494 load_reg( R_EAX, 0 );
2495 load_spreg( R_ECX, R_GBR );
2496 ADD_r32_r32( R_EAX, R_ECX );
2497 PUSH_r32(R_ECX);
2498 call_func0(sh4_read_byte);
2499 POP_r32(R_ECX);
2500 XOR_imm32_r32( imm, R_EAX );
2501 MEM_WRITE_BYTE( R_ECX, R_EAX );
2502 }
2503 break;
2504 case 0xF:
2505 { /* OR.B #imm, @(R0, GBR) */
2506 uint32_t imm = (ir&0xFF);
2507 load_reg( R_EAX, 0 );
2508 load_spreg( R_ECX, R_GBR );
2509 ADD_r32_r32( R_EAX, R_ECX );
2510 PUSH_r32(R_ECX);
2511 call_func0(sh4_read_byte);
2512 POP_r32(R_ECX);
2513 OR_imm32_r32(imm, R_EAX );
2514 MEM_WRITE_BYTE( R_ECX, R_EAX );
2515 }
2516 break;
2517 }
2518 break;
2519 case 0xD:
2520 { /* MOV.L @(disp, PC), Rn */
2521 uint32_t Rn = ((ir>>8)&0xF); uint32_t disp = (ir&0xFF)<<2;
2522 if( sh4_x86.in_delay_slot ) {
2523 SLOTILLEGAL();
2524 } else {
2525 uint32_t target = (pc & 0xFFFFFFFC) + disp + 4;
2526 char *ptr = mem_get_region(target);
2527 if( ptr != NULL ) {
2528 MOV_moff32_EAX( (uint32_t)ptr );
2529 } else {
2530 load_imm32( R_ECX, target );
2531 MEM_READ_LONG( R_ECX, R_EAX );
2532 }
2533 store_reg( R_EAX, Rn );
2534 }
2535 }
2536 break;
2537 case 0xE:
2538 { /* MOV #imm, Rn */
2539 uint32_t Rn = ((ir>>8)&0xF); int32_t imm = SIGNEXT8(ir&0xFF);
2540 load_imm32( R_EAX, imm );
2541 store_reg( R_EAX, Rn );
2542 }
2543 break;
2544 case 0xF:
2545 switch( ir&0xF ) {
2546 case 0x0:
2547 { /* FADD FRm, FRn */
2548 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2549 check_fpuen();
2550 load_spreg( R_ECX, R_FPSCR );
2551 TEST_imm32_r32( FPSCR_PR, R_ECX );
2552 load_fr_bank( R_EDX );
2553 JNE_rel8(13,doubleprec);
2554 push_fr(R_EDX, FRm);
2555 push_fr(R_EDX, FRn);
2556 FADDP_st(1);
2557 pop_fr(R_EDX, FRn);
2558 JMP_rel8(11,end);
2559 JMP_TARGET(doubleprec);
2560 push_dr(R_EDX, FRm);
2561 push_dr(R_EDX, FRn);
2562 FADDP_st(1);
2563 pop_dr(R_EDX, FRn);
2564 JMP_TARGET(end);
2565 }
2566 break;
2567 case 0x1:
2568 { /* FSUB FRm, FRn */
2569 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2570 check_fpuen();
2571 load_spreg( R_ECX, R_FPSCR );
2572 TEST_imm32_r32( FPSCR_PR, R_ECX );
2573 load_fr_bank( R_EDX );
2574 JNE_rel8(13, doubleprec);
2575 push_fr(R_EDX, FRn);
2576 push_fr(R_EDX, FRm);
2577 FSUBP_st(1);
2578 pop_fr(R_EDX, FRn);
2579 JMP_rel8(11, end);
2580 JMP_TARGET(doubleprec);
2581 push_dr(R_EDX, FRn);
2582 push_dr(R_EDX, FRm);
2583 FSUBP_st(1);
2584 pop_dr(R_EDX, FRn);
2585 JMP_TARGET(end);
2586 }
2587 break;
2588 case 0x2:
2589 { /* FMUL FRm, FRn */
2590 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2591 check_fpuen();
2592 load_spreg( R_ECX, R_FPSCR );
2593 TEST_imm32_r32( FPSCR_PR, R_ECX );
2594 load_fr_bank( R_EDX );
2595 JNE_rel8(13, doubleprec);
2596 push_fr(R_EDX, FRm);
2597 push_fr(R_EDX, FRn);
2598 FMULP_st(1);
2599 pop_fr(R_EDX, FRn);
2600 JMP_rel8(11, end);
2601 JMP_TARGET(doubleprec);
2602 push_dr(R_EDX, FRm);
2603 push_dr(R_EDX, FRn);
2604 FMULP_st(1);
2605 pop_dr(R_EDX, FRn);
2606 JMP_TARGET(end);
2607 }
2608 break;
2609 case 0x3:
2610 { /* FDIV FRm, FRn */
2611 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2612 check_fpuen();
2613 load_spreg( R_ECX, R_FPSCR );
2614 TEST_imm32_r32( FPSCR_PR, R_ECX );
2615 load_fr_bank( R_EDX );
2616 JNE_rel8(13, doubleprec);
2617 push_fr(R_EDX, FRn);
2618 push_fr(R_EDX, FRm);
2619 FDIVP_st(1);
2620 pop_fr(R_EDX, FRn);
2621 JMP_rel8(11, end);
2622 JMP_TARGET(doubleprec);
2623 push_dr(R_EDX, FRn);
2624 push_dr(R_EDX, FRm);
2625 FDIVP_st(1);
2626 pop_dr(R_EDX, FRn);
2627 JMP_TARGET(end);
2628 }
2629 break;
2630 case 0x4:
2631 { /* FCMP/EQ FRm, FRn */
2632 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2633 check_fpuen();
2634 load_spreg( R_ECX, R_FPSCR );
2635 TEST_imm32_r32( FPSCR_PR, R_ECX );
2636 load_fr_bank( R_EDX );
2637 JNE_rel8(8, doubleprec);
2638 push_fr(R_EDX, FRm);
2639 push_fr(R_EDX, FRn);
2640 JMP_rel8(6, end);
2641 JMP_TARGET(doubleprec);
2642 push_dr(R_EDX, FRm);
2643 push_dr(R_EDX, FRn);
2644 JMP_TARGET(end);
2645 FCOMIP_st(1);
2646 SETE_t();
2647 FPOP_st();
2648 }
2649 break;
2650 case 0x5:
2651 { /* FCMP/GT FRm, FRn */
2652 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2653 check_fpuen();
2654 load_spreg( R_ECX, R_FPSCR );
2655 TEST_imm32_r32( FPSCR_PR, R_ECX );
2656 load_fr_bank( R_EDX );
2657 JNE_rel8(8, doubleprec);
2658 push_fr(R_EDX, FRm);
2659 push_fr(R_EDX, FRn);
2660 JMP_rel8(6, end);
2661 JMP_TARGET(doubleprec);
2662 push_dr(R_EDX, FRm);
2663 push_dr(R_EDX, FRn);
2664 JMP_TARGET(end);
2665 FCOMIP_st(1);
2666 SETA_t();
2667 FPOP_st();
2668 }
2669 break;
2670 case 0x6:
2671 { /* FMOV @(R0, Rm), FRn */
2672 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2673 check_fpuen();
2674 load_reg( R_EDX, Rm );
2675 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EDX );
2676 check_ralign32( R_EDX );
2677 load_spreg( R_ECX, R_FPSCR );
2678 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2679 JNE_rel8(19, doublesize);
2680 MEM_READ_LONG( R_EDX, R_EAX );
2681 load_fr_bank( R_ECX );
2682 store_fr( R_ECX, R_EAX, FRn );
2683 if( FRn&1 ) {
2684 JMP_rel8(48, end);
2685 JMP_TARGET(doublesize);
2686 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2687 load_spreg( R_ECX, R_FPSCR ); // assume read_long clobbered it
2688 load_xf_bank( R_ECX );
2689 store_fr( R_ECX, R_EAX, FRn&0x0E );
2690 store_fr( R_ECX, R_EDX, FRn|0x01 );
2691 JMP_TARGET(end);
2692 } else {
2693 JMP_rel8(36, end);
2694 JMP_TARGET(doublesize);
2695 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2696 load_fr_bank( R_ECX );
2697 store_fr( R_ECX, R_EAX, FRn&0x0E );
2698 store_fr( R_ECX, R_EDX, FRn|0x01 );
2699 JMP_TARGET(end);
2700 }
2701 }
2702 break;
2703 case 0x7:
2704 { /* FMOV FRm, @(R0, Rn) */
2705 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2706 check_fpuen();
2707 load_reg( R_EDX, Rn );
2708 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EDX );
2709 check_walign32( R_EDX );
2710 load_spreg( R_ECX, R_FPSCR );
2711 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2712 JNE_rel8(20, doublesize);
2713 load_fr_bank( R_ECX );
2714 load_fr( R_ECX, R_EAX, FRm );
2715 MEM_WRITE_LONG( R_EDX, R_EAX ); // 12
2716 if( FRm&1 ) {
2717 JMP_rel8( 48, end );
2718 JMP_TARGET(doublesize);
2719 load_xf_bank( R_ECX );
2720 load_fr( R_ECX, R_EAX, FRm&0x0E );
2721 load_fr( R_ECX, R_ECX, FRm|0x01 );
2722 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2723 JMP_TARGET(end);
2724 } else {
2725 JMP_rel8( 39, end );
2726 JMP_TARGET(doublesize);
2727 load_fr_bank( R_ECX );
2728 load_fr( R_ECX, R_EAX, FRm&0x0E );
2729 load_fr( R_ECX, R_ECX, FRm|0x01 );
2730 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2731 JMP_TARGET(end);
2732 }
2733 }
2734 break;
2735 case 0x8:
2736 { /* FMOV @Rm, FRn */
2737 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2738 check_fpuen();
2739 load_reg( R_EDX, Rm );
2740 check_ralign32( R_EDX );
2741 load_spreg( R_ECX, R_FPSCR );
2742 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2743 JNE_rel8(19, doublesize);
2744 MEM_READ_LONG( R_EDX, R_EAX );
2745 load_fr_bank( R_ECX );
2746 store_fr( R_ECX, R_EAX, FRn );
2747 if( FRn&1 ) {
2748 JMP_rel8(48, end);
2749 JMP_TARGET(doublesize);
2750 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2751 load_spreg( R_ECX, R_FPSCR ); // assume read_long clobbered it
2752 load_xf_bank( R_ECX );
2753 store_fr( R_ECX, R_EAX, FRn&0x0E );
2754 store_fr( R_ECX, R_EDX, FRn|0x01 );
2755 JMP_TARGET(end);
2756 } else {
2757 JMP_rel8(36, end);
2758 JMP_TARGET(doublesize);
2759 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2760 load_fr_bank( R_ECX );
2761 store_fr( R_ECX, R_EAX, FRn&0x0E );
2762 store_fr( R_ECX, R_EDX, FRn|0x01 );
2763 JMP_TARGET(end);
2764 }
2765 }
2766 break;
2767 case 0x9:
2768 { /* FMOV @Rm+, FRn */
2769 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
2770 check_fpuen();
2771 load_reg( R_EDX, Rm );
2772 check_ralign32( R_EDX );
2773 MOV_r32_r32( R_EDX, R_EAX );
2774 load_spreg( R_ECX, R_FPSCR );
2775 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2776 JNE_rel8(25, doublesize);
2777 ADD_imm8s_r32( 4, R_EAX );
2778 store_reg( R_EAX, Rm );
2779 MEM_READ_LONG( R_EDX, R_EAX );
2780 load_fr_bank( R_ECX );
2781 store_fr( R_ECX, R_EAX, FRn );
2782 if( FRn&1 ) {
2783 JMP_rel8(54, end);
2784 JMP_TARGET(doublesize);
2785 ADD_imm8s_r32( 8, R_EAX );
2786 store_reg(R_EAX, Rm);
2787 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2788 load_spreg( R_ECX, R_FPSCR ); // assume read_long clobbered it
2789 load_xf_bank( R_ECX );
2790 store_fr( R_ECX, R_EAX, FRn&0x0E );
2791 store_fr( R_ECX, R_EDX, FRn|0x01 );
2792 JMP_TARGET(end);
2793 } else {
2794 JMP_rel8(42, end);
2795 ADD_imm8s_r32( 8, R_EAX );
2796 store_reg(R_EAX, Rm);
2797 MEM_READ_DOUBLE( R_EDX, R_EAX, R_EDX );
2798 load_fr_bank( R_ECX );
2799 store_fr( R_ECX, R_EAX, FRn&0x0E );
2800 store_fr( R_ECX, R_EDX, FRn|0x01 );
2801 JMP_TARGET(end);
2802 }
2803 }
2804 break;
2805 case 0xA:
2806 { /* FMOV FRm, @Rn */
2807 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2808 check_fpuen();
2809 load_reg( R_EDX, Rn );
2810 check_walign32( R_EDX );
2811 load_spreg( R_ECX, R_FPSCR );
2812 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2813 JNE_rel8(20, doublesize);
2814 load_fr_bank( R_ECX );
2815 load_fr( R_ECX, R_EAX, FRm );
2816 MEM_WRITE_LONG( R_EDX, R_EAX ); // 12
2817 if( FRm&1 ) {
2818 JMP_rel8( 48, end );
2819 JMP_TARGET(doublesize);
2820 load_xf_bank( R_ECX );
2821 load_fr( R_ECX, R_EAX, FRm&0x0E );
2822 load_fr( R_ECX, R_ECX, FRm|0x01 );
2823 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2824 JMP_TARGET(end);
2825 } else {
2826 JMP_rel8( 39, end );
2827 JMP_TARGET(doublesize);
2828 load_fr_bank( R_ECX );
2829 load_fr( R_ECX, R_EAX, FRm&0x0E );
2830 load_fr( R_ECX, R_ECX, FRm|0x01 );
2831 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2832 JMP_TARGET(end);
2833 }
2834 }
2835 break;
2836 case 0xB:
2837 { /* FMOV FRm, @-Rn */
2838 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2839 check_fpuen();
2840 load_reg( R_EDX, Rn );
2841 check_walign32( R_EDX );
2842 load_spreg( R_ECX, R_FPSCR );
2843 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2844 JNE_rel8(26, doublesize);
2845 load_fr_bank( R_ECX );
2846 load_fr( R_ECX, R_EAX, FRm );
2847 ADD_imm8s_r32(-4,R_EDX);
2848 store_reg( R_EDX, Rn );
2849 MEM_WRITE_LONG( R_EDX, R_EAX ); // 12
2850 if( FRm&1 ) {
2851 JMP_rel8( 54, end );
2852 JMP_TARGET(doublesize);
2853 load_xf_bank( R_ECX );
2854 load_fr( R_ECX, R_EAX, FRm&0x0E );
2855 load_fr( R_ECX, R_ECX, FRm|0x01 );
2856 ADD_imm8s_r32(-8,R_EDX);
2857 store_reg( R_EDX, Rn );
2858 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2859 JMP_TARGET(end);
2860 } else {
2861 JMP_rel8( 45, end );
2862 JMP_TARGET(doublesize);
2863 load_fr_bank( R_ECX );
2864 load_fr( R_ECX, R_EAX, FRm&0x0E );
2865 load_fr( R_ECX, R_ECX, FRm|0x01 );
2866 ADD_imm8s_r32(-8,R_EDX);
2867 store_reg( R_EDX, Rn );
2868 MEM_WRITE_DOUBLE( R_EDX, R_EAX, R_ECX );
2869 JMP_TARGET(end);
2870 }
2871 }
2872 break;
2873 case 0xC:
2874 { /* FMOV FRm, FRn */
2875 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
2876 /* As horrible as this looks, it's actually covering 5 separate cases:
2877 * 1. 32-bit fr-to-fr (PR=0)
2878 * 2. 64-bit dr-to-dr (PR=1, FRm&1 == 0, FRn&1 == 0 )
2879 * 3. 64-bit dr-to-xd (PR=1, FRm&1 == 0, FRn&1 == 1 )
2880 * 4. 64-bit xd-to-dr (PR=1, FRm&1 == 1, FRn&1 == 0 )
2881 * 5. 64-bit xd-to-xd (PR=1, FRm&1 == 1, FRn&1 == 1 )
2882 */
2883 check_fpuen();
2884 load_spreg( R_ECX, R_FPSCR );
2885 load_fr_bank( R_EDX );
2886 TEST_imm32_r32( FPSCR_SZ, R_ECX );
2887 JNE_rel8(8, doublesize);
2888 load_fr( R_EDX, R_EAX, FRm ); // PR=0 branch
2889 store_fr( R_EDX, R_EAX, FRn );
2890 if( FRm&1 ) {
2891 JMP_rel8(24, end);
2892 JMP_TARGET(doublesize);
2893 load_xf_bank( R_ECX );
2894 load_fr( R_ECX, R_EAX, FRm-1 );
2895 if( FRn&1 ) {
2896 load_fr( R_ECX, R_EDX, FRm );
2897 store_fr( R_ECX, R_EAX, FRn-1 );
2898 store_fr( R_ECX, R_EDX, FRn );
2899 } else /* FRn&1 == 0 */ {
2900 load_fr( R_ECX, R_ECX, FRm );
2901 store_fr( R_EDX, R_EAX, FRn );
2902 store_fr( R_EDX, R_ECX, FRn+1 );
2903 }
2904 JMP_TARGET(end);
2905 } else /* FRm&1 == 0 */ {
2906 if( FRn&1 ) {
2907 JMP_rel8(24, end);
2908 load_xf_bank( R_ECX );
2909 load_fr( R_EDX, R_EAX, FRm );
2910 load_fr( R_EDX, R_EDX, FRm+1 );
2911 store_fr( R_ECX, R_EAX, FRn-1 );
2912 store_fr( R_ECX, R_EDX, FRn );
2913 JMP_TARGET(end);
2914 } else /* FRn&1 == 0 */ {
2915 JMP_rel8(12, end);
2916 load_fr( R_EDX, R_EAX, FRm );
2917 load_fr( R_EDX, R_ECX, FRm+1 );
2918 store_fr( R_EDX, R_EAX, FRn );
2919 store_fr( R_EDX, R_ECX, FRn+1 );
2920 JMP_TARGET(end);
2921 }
2922 }
2923 }
2924 break;
2925 case 0xD:
2926 switch( (ir&0xF0) >> 4 ) {
2927 case 0x0:
2928 { /* FSTS FPUL, FRn */
2929 uint32_t FRn = ((ir>>8)&0xF);
2930 check_fpuen();
2931 load_fr_bank( R_ECX );
2932 load_spreg( R_EAX, R_FPUL );
2933 store_fr( R_ECX, R_EAX, FRn );
2934 }
2935 break;
2936 case 0x1:
2937 { /* FLDS FRm, FPUL */
2938 uint32_t FRm = ((ir>>8)&0xF);
2939 check_fpuen();
2940 load_fr_bank( R_ECX );
2941 load_fr( R_ECX, R_EAX, FRm );
2942 store_spreg( R_EAX, R_FPUL );
2943 }
2944 break;
2945 case 0x2:
2946 { /* FLOAT FPUL, FRn */
2947 uint32_t FRn = ((ir>>8)&0xF);
2948 check_fpuen();
2949 load_spreg( R_ECX, R_FPSCR );
2950 load_spreg(R_EDX, REG_OFFSET(fr_bank));
2951 FILD_sh4r(R_FPUL);
2952 TEST_imm32_r32( FPSCR_PR, R_ECX );
2953 JNE_rel8(5, doubleprec);
2954 pop_fr( R_EDX, FRn );
2955 JMP_rel8(3, end);
2956 JMP_TARGET(doubleprec);
2957 pop_dr( R_EDX, FRn );
2958 JMP_TARGET(end);
2959 }
2960 break;
2961 case 0x3:
2962 { /* FTRC FRm, FPUL */
2963 uint32_t FRm = ((ir>>8)&0xF);
2964 check_fpuen();
2965 load_spreg( R_ECX, R_FPSCR );
2966 load_fr_bank( R_EDX );
2967 TEST_imm32_r32( FPSCR_PR, R_ECX );
2968 JNE_rel8(5, doubleprec);
2969 push_fr( R_EDX, FRm );
2970 JMP_rel8(3, doop);
2971 JMP_TARGET(doubleprec);
2972 push_dr( R_EDX, FRm );
2973 JMP_TARGET( doop );
2974 load_imm32( R_ECX, (uint32_t)&max_int );
2975 FILD_r32ind( R_ECX );
2976 FCOMIP_st(1);
2977 JNA_rel8( 32, sat );
2978 load_imm32( R_ECX, (uint32_t)&min_int ); // 5
2979 FILD_r32ind( R_ECX ); // 2
2980 FCOMIP_st(1); // 2
2981 JAE_rel8( 21, sat2 ); // 2
2982 load_imm32( R_EAX, (uint32_t)&save_fcw );
2983 FNSTCW_r32ind( R_EAX );
2984 load_imm32( R_EDX, (uint32_t)&trunc_fcw );
2985 FLDCW_r32ind( R_EDX );
2986 FISTP_sh4r(R_FPUL); // 3
2987 FLDCW_r32ind( R_EAX );
2988 JMP_rel8( 9, end ); // 2
2990 JMP_TARGET(sat);
2991 JMP_TARGET(sat2);
2992 MOV_r32ind_r32( R_ECX, R_ECX ); // 2
2993 store_spreg( R_ECX, R_FPUL );
2994 FPOP_st();
2995 JMP_TARGET(end);
2996 }
2997 break;
2998 case 0x4:
2999 { /* FNEG FRn */
3000 uint32_t FRn = ((ir>>8)&0xF);
3001 check_fpuen();
3002 load_spreg( R_ECX, R_FPSCR );
3003 TEST_imm32_r32( FPSCR_PR, R_ECX );
3004 load_fr_bank( R_EDX );
3005 JNE_rel8(10, doubleprec);
3006 push_fr(R_EDX, FRn);
3007 FCHS_st0();
3008 pop_fr(R_EDX, FRn);
3009 JMP_rel8(8, end);
3010 JMP_TARGET(doubleprec);
3011 push_dr(R_EDX, FRn);
3012 FCHS_st0();
3013 pop_dr(R_EDX, FRn);
3014 JMP_TARGET(end);
3015 }
3016 break;
3017 case 0x5:
3018 { /* FABS FRn */
3019 uint32_t FRn = ((ir>>8)&0xF);
3020 check_fpuen();
3021 load_spreg( R_ECX, R_FPSCR );
3022 load_fr_bank( R_EDX );
3023 TEST_imm32_r32( FPSCR_PR, R_ECX );
3024 JNE_rel8(10, doubleprec);
3025 push_fr(R_EDX, FRn); // 3
3026 FABS_st0(); // 2
3027 pop_fr( R_EDX, FRn); //3
3028 JMP_rel8(8,end); // 2
3029 JMP_TARGET(doubleprec);
3030 push_dr(R_EDX, FRn);
3031 FABS_st0();
3032 pop_dr(R_EDX, FRn);
3033 JMP_TARGET(end);
3034 }
3035 break;
3036 case 0x6:
3037 { /* FSQRT FRn */
3038 uint32_t FRn = ((ir>>8)&0xF);
3039 check_fpuen();
3040 load_spreg( R_ECX, R_FPSCR );
3041 TEST_imm32_r32( FPSCR_PR, R_ECX );
3042 load_fr_bank( R_EDX );
3043 JNE_rel8(10, doubleprec);
3044 push_fr(R_EDX, FRn);
3045 FSQRT_st0();
3046 pop_fr(R_EDX, FRn);
3047 JMP_rel8(8, end);
3048 JMP_TARGET(doubleprec);
3049 push_dr(R_EDX, FRn);
3050 FSQRT_st0();
3051 pop_dr(R_EDX, FRn);
3052 JMP_TARGET(end);
3053 }
3054 break;
3055 case 0x7:
3056 { /* FSRRA FRn */
3057 uint32_t FRn = ((ir>>8)&0xF);
3058 check_fpuen();
3059 load_spreg( R_ECX, R_FPSCR );
3060 TEST_imm32_r32( FPSCR_PR, R_ECX );
3061 load_fr_bank( R_EDX );
3062 JNE_rel8(12, end); // PR=0 only
3063 FLD1_st0();
3064 push_fr(R_EDX, FRn);
3065 FSQRT_st0();
3066 FDIVP_st(1);
3067 pop_fr(R_EDX, FRn);
3068 JMP_TARGET(end);
3069 }
3070 break;
3071 case 0x8:
3072 { /* FLDI0 FRn */
3073 uint32_t FRn = ((ir>>8)&0xF);
3074 /* IFF PR=0 */
3075 check_fpuen();
3076 load_spreg( R_ECX, R_FPSCR );
3077 TEST_imm32_r32( FPSCR_PR, R_ECX );
3078 JNE_rel8(8, end);
3079 XOR_r32_r32( R_EAX, R_EAX );
3080 load_spreg( R_ECX, REG_OFFSET(fr_bank) );
3081 store_fr( R_ECX, R_EAX, FRn );
3082 JMP_TARGET(end);
3083 }
3084 break;
3085 case 0x9:
3086 { /* FLDI1 FRn */
3087 uint32_t FRn = ((ir>>8)&0xF);
3088 /* IFF PR=0 */
3089 check_fpuen();
3090 load_spreg( R_ECX, R_FPSCR );
3091 TEST_imm32_r32( FPSCR_PR, R_ECX );
3092 JNE_rel8(11, end);
3093 load_imm32(R_EAX, 0x3F800000);
3094 load_spreg( R_ECX, REG_OFFSET(fr_bank) );
3095 store_fr( R_ECX, R_EAX, FRn );
3096 JMP_TARGET(end);
3097 }
3098 break;
3099 case 0xA:
3100 { /* FCNVSD FPUL, FRn */
3101 uint32_t FRn = ((ir>>8)&0xF);
3102 check_fpuen();
3103 load_spreg( R_ECX, R_FPSCR );
3104 TEST_imm32_r32( FPSCR_PR, R_ECX );
3105 JE_rel8(9, end); // only when PR=1
3106 load_fr_bank( R_ECX );
3107 push_fpul();
3108 pop_dr( R_ECX, FRn );
3109 JMP_TARGET(end);
3110 }
3111 break;
3112 case 0xB:
3113 { /* FCNVDS FRm, FPUL */
3114 uint32_t FRm = ((ir>>8)&0xF);
3115 check_fpuen();
3116 load_spreg( R_ECX, R_FPSCR );
3117 TEST_imm32_r32( FPSCR_PR, R_ECX );
3118 JE_rel8(9, end); // only when PR=1
3119 load_fr_bank( R_ECX );
3120 push_dr( R_ECX, FRm );
3121 pop_fpul();
3122 JMP_TARGET(end);
3123 }
3124 break;
3125 case 0xE:
3126 { /* FIPR FVm, FVn */
3127 uint32_t FVn = ((ir>>10)&0x3); uint32_t FVm = ((ir>>8)&0x3);
3128 check_fpuen();
3129 load_spreg( R_ECX, R_FPSCR );
3130 TEST_imm32_r32( FPSCR_PR, R_ECX );
3131 JNE_rel8(44, doubleprec);
3133 load_fr_bank( R_ECX );
3134 push_fr( R_ECX, FVm<<2 );
3135 push_fr( R_ECX, FVn<<2 );
3136 FMULP_st(1);
3137 push_fr( R_ECX, (FVm<<2)+1);
3138 push_fr( R_ECX, (FVn<<2)+1);
3139 FMULP_st(1);
3140 FADDP_st(1);
3141 push_fr( R_ECX, (FVm<<2)+2);
3142 push_fr( R_ECX, (FVn<<2)+2);
3143 FMULP_st(1);
3144 FADDP_st(1);
3145 push_fr( R_ECX, (FVm<<2)+3);
3146 push_fr( R_ECX, (FVn<<2)+3);
3147 FMULP_st(1);
3148 FADDP_st(1);
3149 pop_fr( R_ECX, (FVn<<2)+3);
3150 JMP_TARGET(doubleprec);
3151 }
3152 break;
3153 case 0xF:
3154 switch( (ir&0x100) >> 8 ) {
3155 case 0x0:
3156 { /* FSCA FPUL, FRn */
3157 uint32_t FRn = ((ir>>9)&0x7)<<1;
3158 check_fpuen();
3159 load_spreg( R_ECX, R_FPSCR );
3160 TEST_imm32_r32( FPSCR_PR, R_ECX );
3161 JNE_rel8( 21, doubleprec );
3162 load_fr_bank( R_ECX );
3163 ADD_imm8s_r32( (FRn&0x0E)<<2, R_ECX );
3164 load_spreg( R_EDX, R_FPUL );
3165 call_func2( sh4_fsca, R_EDX, R_ECX );
3166 JMP_TARGET(doubleprec);
3167 }
3168 break;
3169 case 0x1:
3170 switch( (ir&0x200) >> 9 ) {
3171 case 0x0:
3172 { /* FTRV XMTRX, FVn */
3173 uint32_t FVn = ((ir>>10)&0x3);
3174 check_fpuen();
3175 load_spreg( R_ECX, R_FPSCR );
3176 TEST_imm32_r32( FPSCR_PR, R_ECX );
3177 JNE_rel8( 30, doubleprec );
3178 load_fr_bank( R_EDX ); // 3
3179 ADD_imm8s_r32( FVn<<4, R_EDX ); // 3
3180 load_xf_bank( R_ECX ); // 12
3181 call_func2( sh4_ftrv, R_EDX, R_ECX ); // 12
3182 JMP_TARGET(doubleprec);
3183 }
3184 break;
3185 case 0x1:
3186 switch( (ir&0xC00) >> 10 ) {
3187 case 0x0:
3188 { /* FSCHG */
3189 check_fpuen();
3190 load_spreg( R_ECX, R_FPSCR );
3191 XOR_imm32_r32( FPSCR_SZ, R_ECX );
3192 store_spreg( R_ECX, R_FPSCR );
3193 }
3194 break;
3195 case 0x2:
3196 { /* FRCHG */
3197 check_fpuen();
3198 load_spreg( R_ECX, R_FPSCR );
3199 XOR_imm32_r32( FPSCR_FR, R_ECX );
3200 store_spreg( R_ECX, R_FPSCR );
3201 update_fr_bank( R_ECX );
3202 }
3203 break;
3204 case 0x3:
3205 { /* UNDEF */
3206 if( sh4_x86.in_delay_slot ) {
3207 SLOTILLEGAL();
3208 } else {
3209 JMP_exit(EXIT_ILLEGAL);
3210 return 1;
3211 }
3212 }
3213 break;
3214 default:
3215 UNDEF();
3216 break;
3217 }
3218 break;
3219 }
3220 break;
3221 }
3222 break;
3223 default:
3224 UNDEF();
3225 break;
3226 }
3227 break;
3228 case 0xE:
3229 { /* FMAC FR0, FRm, FRn */
3230 uint32_t FRn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
3231 check_fpuen();
3232 load_spreg( R_ECX, R_FPSCR );
3233 load_spreg( R_EDX, REG_OFFSET(fr_bank));
3234 TEST_imm32_r32( FPSCR_PR, R_ECX );
3235 JNE_rel8(18, doubleprec);
3236 push_fr( R_EDX, 0 );
3237 push_fr( R_EDX, FRm );
3238 FMULP_st(1);
3239 push_fr( R_EDX, FRn );
3240 FADDP_st(1);
3241 pop_fr( R_EDX, FRn );
3242 JMP_rel8(16, end);
3243 JMP_TARGET(doubleprec);
3244 push_dr( R_EDX, 0 );
3245 push_dr( R_EDX, FRm );
3246 FMULP_st(1);
3247 push_dr( R_EDX, FRn );
3248 FADDP_st(1);
3249 pop_dr( R_EDX, FRn );
3250 JMP_TARGET(end);
3251 }
3252 break;
3253 default:
3254 UNDEF();
3255 break;
3256 }
3257 break;
3258 }
3260 if( sh4_x86.in_delay_slot ) {
3261 ADD_imm8s_r32(2,R_ESI);
3262 sh4_x86.in_delay_slot = FALSE;
3263 return 1;
3264 } else {
3265 INC_r32(R_ESI);
3266 }
3267 return 0;
3268 }
.