2 * $Id: armcore.c,v 1.13 2005-12-28 22:50:08 nkeynes Exp $
4 * ARM7TDMI CPU emulation core.
6 * Copyright (c) 2005 Nathan Keynes.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #define MODULE aica_module
21 #include "aica/armcore.h"
24 #define STM_R15_OFFSET 12
26 struct arm_registers armr;
28 void arm_set_mode( int mode );
30 uint32_t arm_exceptions[][2] = {{ MODE_SVC, 0x00000000 },
31 { MODE_UND, 0x00000004 },
32 { MODE_SVC, 0x00000008 },
33 { MODE_ABT, 0x0000000C },
34 { MODE_ABT, 0x00000010 },
35 { MODE_IRQ, 0x00000018 },
36 { MODE_FIQ, 0x0000001C } };
39 #define EXC_UNDEFINED 1
40 #define EXC_SOFTWARE 2
41 #define EXC_PREFETCH_ABORT 3
42 #define EXC_DATA_ABORT 4
44 #define EXC_FAST_IRQ 6
46 uint32_t arm_cpu_freq = ARM_BASE_RATE;
47 uint32_t arm_cpu_period = 1000 / ARM_BASE_RATE;
50 static struct breakpoint_struct arm_breakpoints[MAX_BREAKPOINTS];
51 static int arm_breakpoint_count = 0;
53 void arm_set_breakpoint( uint32_t pc, int type )
55 arm_breakpoints[arm_breakpoint_count].address = pc;
56 arm_breakpoints[arm_breakpoint_count].type = type;
57 arm_breakpoint_count++;
60 gboolean arm_clear_breakpoint( uint32_t pc, int type )
64 for( i=0; i<arm_breakpoint_count; i++ ) {
65 if( arm_breakpoints[i].address == pc &&
66 arm_breakpoints[i].type == type ) {
67 while( ++i < arm_breakpoint_count ) {
68 arm_breakpoints[i-1].address = arm_breakpoints[i].address;
69 arm_breakpoints[i-1].type = arm_breakpoints[i].type;
71 arm_breakpoint_count--;
78 int arm_get_breakpoint( uint32_t pc )
81 for( i=0; i<arm_breakpoint_count; i++ ) {
82 if( arm_breakpoints[i].address == pc )
83 return arm_breakpoints[i].type;
88 uint32_t arm_run_slice( uint32_t nanosecs )
91 uint32_t target = armr.icount + nanosecs / arm_cpu_period;
92 uint32_t start = armr.icount;
93 while( armr.icount < target ) {
95 if( !arm_execute_instruction() )
97 #ifdef ENABLE_DEBUG_MODE
98 for( i=0; i<arm_breakpoint_count; i++ ) {
99 if( arm_breakpoints[i].address == armr.r[15] ) {
103 if( i != arm_breakpoint_count ) {
105 if( arm_breakpoints[i].type == BREAK_ONESHOT )
106 arm_clear_breakpoint( armr.r[15], BREAK_ONESHOT );
112 if( target != armr.icount ) {
113 /* Halted - compute time actually executed */
114 nanosecs = (armr.icount - start) * arm_cpu_period;
119 void arm_save_state( FILE *f )
121 fwrite( &armr, sizeof(armr), 1, f );
124 int arm_load_state( FILE *f )
126 fread( &armr, sizeof(armr), 1, f );
131 void arm_reset( void )
133 /* Wipe all processor state */
134 memset( &armr, 0, sizeof(armr) );
136 armr.cpsr = MODE_SVC | CPSR_I | CPSR_F;
137 armr.r[15] = 0x00000000;
140 #define SET_CPSR_CONTROL 0x00010000
141 #define SET_CPSR_EXTENSION 0x00020000
142 #define SET_CPSR_STATUS 0x00040000
143 #define SET_CPSR_FLAGS 0x00080000
145 uint32_t arm_get_cpsr( void )
147 /* write back all flags to the cpsr */
148 armr.cpsr = armr.cpsr & CPSR_COMPACT_MASK;
149 if( armr.n ) armr.cpsr |= CPSR_N;
150 if( armr.z ) armr.cpsr |= CPSR_Z;
151 if( armr.c ) armr.cpsr |= CPSR_C;
152 if( armr.v ) armr.cpsr |= CPSR_V;
153 if( armr.t ) armr.cpsr |= CPSR_T;
158 * Set the CPSR to the specified value.
160 * @param value values to set in CPSR
161 * @param fields set of mask values to define which sections of the
162 * CPSR to set (one of the SET_CPSR_* values above)
164 void arm_set_cpsr( uint32_t value, uint32_t fields )
166 if( IS_PRIVILEGED_MODE() ) {
167 if( fields & SET_CPSR_CONTROL ) {
168 int mode = value & CPSR_MODE;
169 arm_set_mode( mode );
170 armr.t = ( value & CPSR_T ); /* Technically illegal to change */
171 armr.cpsr = (armr.cpsr & 0xFFFFFF00) | (value & 0x000000FF);
174 /* Middle 16 bits not currently defined */
176 if( fields & SET_CPSR_FLAGS ) {
177 /* Break flags directly out of given value - don't bother writing
180 armr.n = ( value & CPSR_N );
181 armr.z = ( value & CPSR_Z );
182 armr.c = ( value & CPSR_C );
183 armr.v = ( value & CPSR_V );
187 void arm_set_spsr( uint32_t value, uint32_t fields )
189 /* Only defined if we actually have an SPSR register */
190 if( IS_EXCEPTION_MODE() ) {
191 if( fields & SET_CPSR_CONTROL ) {
192 armr.spsr = (armr.spsr & 0xFFFFFF00) | (value & 0x000000FF);
195 /* Middle 16 bits not currently defined */
197 if( fields & SET_CPSR_FLAGS ) {
198 armr.spsr = (armr.spsr & 0x00FFFFFF) | (value & 0xFF000000);
204 * Raise an ARM exception (other than reset, which uses arm_reset().
205 * @param exception one of the EXC_* exception codes defined above.
207 void arm_raise_exception( int exception )
209 int mode = arm_exceptions[exception][0];
210 uint32_t spsr = arm_get_cpsr();
211 arm_set_mode( mode );
213 armr.r[14] = armr.r[15];
214 armr.cpsr = (spsr & (~CPSR_T)) | CPSR_I;
215 if( mode == MODE_FIQ )
217 armr.r[15] = arm_exceptions[exception][1];
220 void arm_restore_cpsr( void )
222 int spsr = armr.spsr;
223 int mode = spsr & CPSR_MODE;
224 arm_set_mode( mode );
226 armr.n = ( spsr & CPSR_N );
227 armr.z = ( spsr & CPSR_Z );
228 armr.c = ( spsr & CPSR_C );
229 armr.v = ( spsr & CPSR_V );
230 armr.t = ( spsr & CPSR_T );
236 * Change the current executing ARM mode to the requested mode.
237 * Saves any required registers to banks and restores those for the
238 * correct mode. (Note does not actually update CPSR at the moment).
240 void arm_set_mode( int targetMode )
242 int currentMode = armr.cpsr & CPSR_MODE;
243 if( currentMode == targetMode )
246 switch( currentMode ) {
249 armr.user_r[5] = armr.r[13];
250 armr.user_r[6] = armr.r[14];
253 armr.svc_r[0] = armr.r[13];
254 armr.svc_r[1] = armr.r[14];
255 armr.svc_r[2] = armr.spsr;
258 armr.abt_r[0] = armr.r[13];
259 armr.abt_r[1] = armr.r[14];
260 armr.abt_r[2] = armr.spsr;
263 armr.und_r[0] = armr.r[13];
264 armr.und_r[1] = armr.r[14];
265 armr.und_r[2] = armr.spsr;
268 armr.irq_r[0] = armr.r[13];
269 armr.irq_r[1] = armr.r[14];
270 armr.irq_r[2] = armr.spsr;
273 armr.fiq_r[0] = armr.r[8];
274 armr.fiq_r[1] = armr.r[9];
275 armr.fiq_r[2] = armr.r[10];
276 armr.fiq_r[3] = armr.r[11];
277 armr.fiq_r[4] = armr.r[12];
278 armr.fiq_r[5] = armr.r[13];
279 armr.fiq_r[6] = armr.r[14];
280 armr.fiq_r[7] = armr.spsr;
281 armr.r[8] = armr.user_r[0];
282 armr.r[9] = armr.user_r[1];
283 armr.r[10] = armr.user_r[2];
284 armr.r[11] = armr.user_r[3];
285 armr.r[12] = armr.user_r[4];
289 switch( targetMode ) {
292 armr.r[13] = armr.user_r[5];
293 armr.r[14] = armr.user_r[6];
296 armr.r[13] = armr.svc_r[0];
297 armr.r[14] = armr.svc_r[1];
298 armr.spsr = armr.svc_r[2];
301 armr.r[13] = armr.abt_r[0];
302 armr.r[14] = armr.abt_r[1];
303 armr.spsr = armr.abt_r[2];
306 armr.r[13] = armr.und_r[0];
307 armr.r[14] = armr.und_r[1];
308 armr.spsr = armr.und_r[2];
311 armr.r[13] = armr.irq_r[0];
312 armr.r[14] = armr.irq_r[1];
313 armr.spsr = armr.irq_r[2];
316 armr.user_r[0] = armr.r[8];
317 armr.user_r[1] = armr.r[9];
318 armr.user_r[2] = armr.r[10];
319 armr.user_r[3] = armr.r[11];
320 armr.user_r[4] = armr.r[12];
321 armr.r[8] = armr.fiq_r[0];
322 armr.r[9] = armr.fiq_r[1];
323 armr.r[10] = armr.fiq_r[2];
324 armr.r[11] = armr.fiq_r[3];
325 armr.r[12] = armr.fiq_r[4];
326 armr.r[13] = armr.fiq_r[5];
327 armr.r[14] = armr.fiq_r[6];
328 armr.spsr = armr.fiq_r[7];
333 /* Page references are as per ARM DDI 0100E (June 2000) */
335 #define MEM_READ_BYTE( addr ) arm_read_byte(addr)
336 #define MEM_READ_WORD( addr ) arm_read_word(addr)
337 #define MEM_READ_LONG( addr ) arm_read_long(addr)
338 #define MEM_WRITE_BYTE( addr, val ) arm_write_byte(addr, val)
339 #define MEM_WRITE_WORD( addr, val ) arm_write_word(addr, val)
340 #define MEM_WRITE_LONG( addr, val ) arm_write_long(addr, val)
343 #define IS_NOTBORROW( result, op1, op2 ) (op2 > op1 ? 0 : 1)
344 #define IS_CARRY( result, op1, op2 ) (result < op1 ? 1 : 0)
345 #define IS_SUBOVERFLOW( result, op1, op2 ) (((op1^op2) & (result^op1)) >> 31)
346 #define IS_ADDOVERFLOW( result, op1, op2 ) (((op1&op2) & (result^op1)) >> 31)
348 #define PC armr.r[15]
350 /* Instruction fields */
351 #define COND(ir) (ir>>28)
352 #define GRP(ir) ((ir>>26)&0x03)
353 #define OPCODE(ir) ((ir>>20)&0x1F)
354 #define IFLAG(ir) (ir&0x02000000)
355 #define SFLAG(ir) (ir&0x00100000)
356 #define PFLAG(ir) (ir&0x01000000)
357 #define UFLAG(ir) (ir&0x00800000)
358 #define BFLAG(ir) (ir&0x00400000)
359 #define WFLAG(ir) (ir&0x00200000)
360 #define LFLAG(ir) SFLAG(ir)
361 #define RN(ir) (armr.r[((ir>>16)&0x0F)] + (((ir>>16)&0x0F) == 0x0F ? 4 : 0))
362 #define RD(ir) (armr.r[((ir>>12)&0x0F)] + (((ir>>12)&0x0F) == 0x0F ? 4 : 0))
363 #define RDn(ir) ((ir>>12)&0x0F)
364 #define RS(ir) (armr.r[((ir>>8)&0x0F)] + (((ir>>8)&0x0F) == 0x0F ? 4 : 0))
365 #define RM(ir) (armr.r[(ir&0x0F)] + (((ir&0x0F) == 0x0F ? 4 : 0)) )
366 #define LRN(ir) armr.r[((ir>>16)&0x0F)]
367 #define LRD(ir) armr.r[((ir>>12)&0x0F)]
368 #define LRS(ir) armr.r[((ir>>8)&0x0F)]
369 #define LRM(ir) armr.r[(ir&0x0F)]
371 #define IMM8(ir) (ir&0xFF)
372 #define IMM12(ir) (ir&0xFFF)
373 #define SHIFTIMM(ir) ((ir>>7)&0x1F)
374 #define IMMROT(ir) ((ir>>7)&0x1E)
375 #define ROTIMM12(ir) ROTATE_RIGHT_LONG(IMM8(ir),IMMROT(ir))
376 #define SIGNEXT24(n) ((n&0x00800000) ? (n|0xFF000000) : (n&0x00FFFFFF))
377 #define SHIFT(ir) ((ir>>4)&0x07)
378 #define DISP24(ir) ((ir&0x00FFFFFF))
379 #define UNDEF(ir) do{ arm_raise_exception( EXC_UNDEFINED ); return TRUE; } while(0)
380 #define UNIMP(ir) do{ PC-=4; ERROR( "Halted on unimplemented instruction at %08x, opcode = %04x", PC, ir ); dreamcast_stop(); return FALSE; }while(0)
383 * Determine the value of the shift-operand for a data processing instruction,
384 * without determing a value for shift_C (optimized form for instructions that
385 * don't require shift_C ).
386 * @see s5.1 Addressing Mode 1 - Data-processing operands (p A5-2, 218)
388 static uint32_t arm_get_shift_operand( uint32_t ir )
390 uint32_t operand, tmp;
391 if( IFLAG(ir) == 0 ) {
394 case 0: /* (Rm << imm) */
395 operand = operand << SHIFTIMM(ir);
397 case 1: /* (Rm << Rs) */
399 if( tmp > 31 ) operand = 0;
400 else operand = operand << tmp;
402 case 2: /* (Rm >> imm) */
403 operand = operand >> SHIFTIMM(ir);
405 case 3: /* (Rm >> Rs) */
407 if( tmp > 31 ) operand = 0;
408 else operand = operand >> ir;
410 case 4: /* (Rm >>> imm) */
412 if( tmp == 0 ) operand = ((int32_t)operand) >> 31;
413 else operand = ((int32_t)operand) >> tmp;
415 case 5: /* (Rm >>> Rs) */
417 if( tmp > 31 ) operand = ((int32_t)operand) >> 31;
418 else operand = ((int32_t)operand) >> tmp;
422 if( tmp == 0 ) /* RRX aka rotate with carry */
423 operand = (operand >> 1) | (armr.c<<31);
425 operand = ROTATE_RIGHT_LONG(operand,tmp);
429 operand = ROTATE_RIGHT_LONG(operand,tmp);
435 operand = ROTATE_RIGHT_LONG(operand, tmp);
441 * Determine the value of the shift-operand for a data processing instruction,
442 * and set armr.shift_c accordingly.
443 * @see s5.1 Addressing Mode 1 - Data-processing operands (p A5-2, 218)
445 static uint32_t arm_get_shift_operand_s( uint32_t ir )
447 uint32_t operand, tmp;
448 if( IFLAG(ir) == 0 ) {
451 case 0: /* (Rm << imm) */
453 if( tmp == 0 ) { /* Rm */
454 armr.shift_c = armr.c;
455 } else { /* Rm << imm */
456 armr.shift_c = (operand >> (32-tmp)) & 0x01;
457 operand = operand << tmp;
460 case 1: /* (Rm << Rs) */
463 armr.shift_c = armr.c;
466 armr.shift_c = (operand >> (32-tmp)) & 0x01;
467 else armr.shift_c = 0;
469 operand = operand << tmp;
473 case 2: /* (Rm >> imm) */
476 armr.shift_c = operand >> 31;
479 armr.shift_c = (operand >> (tmp-1)) & 0x01;
480 operand = RM(ir) >> tmp;
483 case 3: /* (Rm >> Rs) */
486 armr.shift_c = armr.c;
489 armr.shift_c = (operand >> (tmp-1))&0x01;
490 else armr.shift_c = 0;
492 operand = operand >> tmp;
496 case 4: /* (Rm >>> imm) */
499 armr.shift_c = operand >> 31;
500 operand = -armr.shift_c;
502 armr.shift_c = (operand >> (tmp-1)) & 0x01;
503 operand = ((int32_t)operand) >> tmp;
506 case 5: /* (Rm >>> Rs) */
509 armr.shift_c = armr.c;
512 armr.shift_c = (operand >> (tmp-1))&0x01;
513 operand = ((int32_t)operand) >> tmp;
515 armr.shift_c = operand >> 31;
516 operand = ((int32_t)operand) >> 31;
522 if( tmp == 0 ) { /* RRX aka rotate with carry */
523 armr.shift_c = operand&0x01;
524 operand = (operand >> 1) | (armr.c<<31);
526 armr.shift_c = operand>>(tmp-1);
527 operand = ROTATE_RIGHT_LONG(operand,tmp);
533 armr.shift_c = armr.c;
537 armr.shift_c = operand>>31;
539 armr.shift_c = (operand>>(tmp-1))&0x1;
540 operand = ROTATE_RIGHT_LONG(operand,tmp);
549 armr.shift_c = armr.c;
551 operand = ROTATE_RIGHT_LONG(operand, tmp);
552 armr.shift_c = operand>>31;
559 * Another variant of the shifter code for index-based memory addressing.
560 * Distinguished by the fact that it doesn't support register shifts, and
561 * ignores the I flag (WTF do the load/store instructions use the I flag to
562 * mean the _exact opposite_ of what it means for the data processing
565 static uint32_t arm_get_address_index( uint32_t ir )
567 uint32_t operand = RM(ir);
571 case 0: /* (Rm << imm) */
572 operand = operand << SHIFTIMM(ir);
574 case 2: /* (Rm >> imm) */
575 operand = operand >> SHIFTIMM(ir);
577 case 4: /* (Rm >>> imm) */
579 if( tmp == 0 ) operand = ((int32_t)operand) >> 31;
580 else operand = ((int32_t)operand) >> tmp;
584 if( tmp == 0 ) /* RRX aka rotate with carry */
585 operand = (operand >> 1) | (armr.c<<31);
587 operand = ROTATE_RIGHT_LONG(operand,tmp);
595 * Determine the address operand of a load/store instruction, including
596 * applying any pre/post adjustments to the address registers.
597 * @see s5.2 Addressing Mode 2 - Load and Store Word or Unsigned Byte
598 * @param The instruction word.
599 * @return The calculated address
601 static uint32_t arm_get_address_operand( uint32_t ir )
606 switch( (ir>>21)&0x1D ) {
607 case 0: /* Rn -= imm offset (post-indexed) [5.2.8 A5-28] */
610 LRN(ir) = addr - IMM12(ir);
612 case 4: /* Rn += imm offsett (post-indexed) [5.2.8 A5-28] */
615 LRN(ir) = addr + IMM12(ir);
617 case 8: /* Rn - imm offset [5.2.2 A5-20] */
618 addr = RN(ir) - IMM12(ir);
620 case 9: /* Rn -= imm offset (pre-indexed) [5.2.5 A5-24] */
621 addr = RN(ir) - IMM12(ir);
624 case 12: /* Rn + imm offset [5.2.2 A5-20] */
625 addr = RN(ir) + IMM12(ir);
627 case 13: /* Rn += imm offset [5.2.5 A5-24 ] */
628 addr = RN(ir) + IMM12(ir);
631 case 16: /* Rn -= Rm (post-indexed) [5.2.10 A5-32 ] */
634 LRN(ir) = addr - arm_get_address_index(ir);
636 case 20: /* Rn += Rm (post-indexed) [5.2.10 A5-32 ] */
639 LRN(ir) = addr - arm_get_address_index(ir);
641 case 24: /* Rn - Rm [5.2.4 A5-23] */
642 addr = RN(ir) - arm_get_address_index(ir);
644 case 25: /* RN -= Rm (pre-indexed) [5.2.7 A5-26] */
645 addr = RN(ir) - arm_get_address_index(ir);
648 case 28: /* Rn + Rm [5.2.4 A5-23] */
649 addr = RN(ir) + arm_get_address_index(ir);
651 case 29: /* RN += Rm (pre-indexed) [5.2.7 A5-26] */
652 addr = RN(ir) + arm_get_address_index(ir);
659 gboolean arm_execute_instruction( void )
662 uint32_t ir = MEM_READ_LONG(pc);
663 uint32_t operand, operand2, tmp, tmp2, cond;
665 tmp = armr.int_pending & (~armr.cpsr);
668 arm_raise_exception( EXC_FAST_IRQ );
670 arm_raise_exception( EXC_IRQ );
678 * Check the condition bits first - if the condition fails return
679 * immediately without actually looking at the rest of the instruction.
707 cond = armr.c && !armr.z;
710 cond = (!armr.c) || armr.z;
713 cond = (armr.n == armr.v);
716 cond = (armr.n != armr.v);
719 cond = (!armr.z) && (armr.n == armr.v);
722 cond = armr.z || (armr.n != armr.v);
735 * Condition passed, now for the actual instructions...
739 if( (ir & 0x0D900000) == 0x01000000 ) {
740 /* Instructions that aren't actual data processing even though
741 * they sit in the DP instruction block.
743 switch( ir & 0x0FF000F0 ) {
744 case 0x01200010: /* BX Rd */
746 armr.r[15] = RM(ir) & 0xFFFFFFFE;
748 case 0x01000000: /* MRS Rd, CPSR */
749 LRD(ir) = arm_get_cpsr();
751 case 0x01400000: /* MRS Rd, SPSR */
754 case 0x01200000: /* MSR CPSR, Rd */
755 arm_set_cpsr( RM(ir), ir );
757 case 0x01600000: /* MSR SPSR, Rd */
758 arm_set_spsr( RM(ir), ir );
760 case 0x03200000: /* MSR CPSR, imm */
761 arm_set_cpsr( ROTIMM12(ir), ir );
763 case 0x03600000: /* MSR SPSR, imm */
764 arm_set_spsr( ROTIMM12(ir), ir );
769 } else if( (ir & 0x0E000090) == 0x00000090 ) {
770 /* Neither are these */
772 switch( (ir>>5)&0x03 ) {
774 /* Arithmetic extension area */
777 LRN(ir) = RM(ir) * RS(ir);
780 tmp = RM(ir) * RS(ir);
786 LRN(ir) = RM(ir) * RS(ir) + RD(ir);
789 tmp = RM(ir) * RS(ir) + RD(ir);
800 case 11: /* UMLALS */
804 case 13: /* SMULLS */
808 case 15: /* SMLALS */
811 tmp = arm_read_long( RN(ir) );
812 switch( RN(ir) & 0x03 ) {
814 tmp = ROTATE_RIGHT_LONG(tmp, 8);
817 tmp = ROTATE_RIGHT_LONG(tmp, 16);
820 tmp = ROTATE_RIGHT_LONG(tmp, 24);
823 arm_write_long( RN(ir), RM(ir) );
827 tmp = arm_read_byte( RN(ir) );
828 arm_write_byte( RN(ir), RM(ir) );
859 /* Data processing */
862 case 0: /* AND Rd, Rn, operand */
863 LRD(ir) = RN(ir) & arm_get_shift_operand(ir);
865 case 1: /* ANDS Rd, Rn, operand */
866 operand = arm_get_shift_operand_s(ir) & RN(ir);
868 if( RDn(ir) == 15 ) {
871 armr.n = operand>>31;
872 armr.z = (operand == 0);
873 armr.c = armr.shift_c;
876 case 2: /* EOR Rd, Rn, operand */
877 LRD(ir) = RN(ir) ^ arm_get_shift_operand(ir);
879 case 3: /* EORS Rd, Rn, operand */
880 operand = arm_get_shift_operand_s(ir) ^ RN(ir);
882 if( RDn(ir) == 15 ) {
885 armr.n = operand>>31;
886 armr.z = (operand == 0);
887 armr.c = armr.shift_c;
890 case 4: /* SUB Rd, Rn, operand */
891 LRD(ir) = RN(ir) - arm_get_shift_operand(ir);
893 case 5: /* SUBS Rd, Rn, operand */
895 operand2 = arm_get_shift_operand(ir);
896 tmp = operand - operand2;
898 if( RDn(ir) == 15 ) {
903 armr.c = IS_NOTBORROW(tmp,operand,operand2);
904 armr.v = IS_SUBOVERFLOW(tmp,operand,operand2);
907 case 6: /* RSB Rd, operand, Rn */
908 LRD(ir) = arm_get_shift_operand(ir) - RN(ir);
910 case 7: /* RSBS Rd, operand, Rn */
911 operand = arm_get_shift_operand(ir);
913 tmp = operand - operand2;
915 if( RDn(ir) == 15 ) {
920 armr.c = IS_NOTBORROW(tmp,operand,operand2);
921 armr.v = IS_SUBOVERFLOW(tmp,operand,operand2);
924 case 8: /* ADD Rd, Rn, operand */
925 LRD(ir) = RN(ir) + arm_get_shift_operand(ir);
927 case 9: /* ADDS Rd, Rn, operand */
928 operand = arm_get_shift_operand(ir);
930 tmp = operand + operand2;
932 if( RDn(ir) == 15 ) {
937 armr.c = IS_CARRY(tmp,operand,operand2);
938 armr.v = IS_ADDOVERFLOW(tmp,operand,operand2);
942 LRD(ir) = RN(ir) + arm_get_shift_operand(ir) +
946 operand = arm_get_shift_operand(ir);
948 tmp = operand + operand2;
949 tmp2 = tmp + armr.c ? 1 : 0;
951 if( RDn(ir) == 15 ) {
955 armr.z = (tmp == 0 );
956 armr.c = IS_CARRY(tmp,operand,operand2) ||
958 armr.v = IS_ADDOVERFLOW(tmp,operand, operand2) ||
959 ((tmp&0x80000000) != (tmp2&0x80000000));
963 LRD(ir) = RN(ir) - arm_get_shift_operand(ir) -
968 operand2 = arm_get_shift_operand(ir);
969 tmp = operand - operand2;
970 tmp2 = tmp - (armr.c ? 0 : 1);
971 if( RDn(ir) == 15 ) {
975 armr.z = (tmp == 0 );
976 armr.c = IS_NOTBORROW(tmp,operand,operand2) &&
978 armr.v = IS_SUBOVERFLOW(tmp,operand,operand2) ||
979 ((tmp&0x80000000) != (tmp2&0x80000000));
983 LRD(ir) = arm_get_shift_operand(ir) - RN(ir) -
987 operand = arm_get_shift_operand(ir);
989 tmp = operand - operand2;
990 tmp2 = tmp - (armr.c ? 0 : 1);
991 if( RDn(ir) == 15 ) {
995 armr.z = (tmp == 0 );
996 armr.c = IS_NOTBORROW(tmp,operand,operand2) &&
998 armr.v = IS_SUBOVERFLOW(tmp,operand,operand2) ||
999 ((tmp&0x80000000) != (tmp2&0x80000000));
1002 case 17: /* TST Rn, operand */
1003 operand = arm_get_shift_operand_s(ir) & RN(ir);
1004 armr.n = operand>>31;
1005 armr.z = (operand == 0);
1006 armr.c = armr.shift_c;
1008 case 19: /* TEQ Rn, operand */
1009 operand = arm_get_shift_operand_s(ir) ^ RN(ir);
1010 armr.n = operand>>31;
1011 armr.z = (operand == 0);
1012 armr.c = armr.shift_c;
1014 case 21: /* CMP Rn, operand */
1016 operand2 = arm_get_shift_operand(ir);
1017 tmp = operand - operand2;
1019 armr.z = (tmp == 0);
1020 armr.c = IS_NOTBORROW(tmp,operand,operand2);
1021 armr.v = IS_SUBOVERFLOW(tmp,operand,operand2);
1023 case 23: /* CMN Rn, operand */
1025 operand2 = arm_get_shift_operand(ir);
1026 tmp = operand + operand2;
1028 armr.z = (tmp == 0);
1029 armr.c = IS_CARRY(tmp,operand,operand2);
1030 armr.v = IS_ADDOVERFLOW(tmp,operand,operand2);
1032 case 24: /* ORR Rd, Rn, operand */
1033 LRD(ir) = RN(ir) | arm_get_shift_operand(ir);
1035 case 25: /* ORRS Rd, Rn, operand */
1036 operand = arm_get_shift_operand_s(ir) | RN(ir);
1038 if( RDn(ir) == 15 ) {
1041 armr.n = operand>>31;
1042 armr.z = (operand == 0);
1043 armr.c = armr.shift_c;
1046 case 26: /* MOV Rd, operand */
1047 LRD(ir) = arm_get_shift_operand(ir);
1049 case 27: /* MOVS Rd, operand */
1050 operand = arm_get_shift_operand_s(ir);
1052 if( RDn(ir) == 15 ) {
1055 armr.n = operand>>31;
1056 armr.z = (operand == 0);
1057 armr.c = armr.shift_c;
1060 case 28: /* BIC Rd, Rn, operand */
1061 LRD(ir) = RN(ir) & (~arm_get_shift_operand(ir));
1063 case 29: /* BICS Rd, Rn, operand */
1064 operand = RN(ir) & (~arm_get_shift_operand_s(ir));
1066 if( RDn(ir) == 15 ) {
1069 armr.n = operand>>31;
1070 armr.z = (operand == 0);
1071 armr.c = armr.shift_c;
1074 case 30: /* MVN Rd, operand */
1075 LRD(ir) = ~arm_get_shift_operand(ir);
1077 case 31: /* MVNS Rd, operand */
1078 operand = ~arm_get_shift_operand_s(ir);
1080 if( RDn(ir) == 15 ) {
1083 armr.n = operand>>31;
1084 armr.z = (operand == 0);
1085 armr.c = armr.shift_c;
1093 case 1: /* Load/store */
1094 operand = arm_get_address_operand(ir);
1095 switch( (ir>>20)&0x17 ) {
1096 case 0: case 16: case 18: /* STR Rd, address */
1097 arm_write_long( operand, RD(ir) );
1099 case 1: case 17: case 19: /* LDR Rd, address */
1100 LRD(ir) = arm_read_long(operand);
1102 case 2: /* STRT Rd, address */
1103 arm_write_long_user( operand, RD(ir) );
1105 case 3: /* LDRT Rd, address */
1106 LRD(ir) = arm_read_long_user( operand );
1108 case 4: case 20: case 22: /* STRB Rd, address */
1109 arm_write_byte( operand, RD(ir) );
1111 case 5: case 21: case 23: /* LDRB Rd, address */
1112 LRD(ir) = arm_read_byte( operand );
1114 case 6: /* STRBT Rd, address */
1115 arm_write_byte_user( operand, RD(ir) );
1117 case 7: /* LDRBT Rd, address */
1118 LRD(ir) = arm_read_byte_user( operand );
1122 case 2: /* Load/store multiple, branch*/
1123 if( (ir & 0x02000000) == 0x02000000 ) { /* B[L] imm24 */
1124 operand = (SIGNEXT24(ir&0x00FFFFFF) << 2);
1125 if( (ir & 0x01000000) == 0x01000000 ) {
1126 armr.r[14] = pc; /* BL */
1128 armr.r[15] = pc + 4 + operand;
1129 } else { /* Load/store multiple */
1130 int prestep, poststep;
1133 poststep = UFLAG(ir) ? 4 : -4;
1135 prestep = UFLAG(ir) ? 4 : -4;
1140 /* Actually S - bit 22. Means "make massively complicated" */
1141 if( LFLAG(ir) && (ir&0x00008000) ) {
1142 /* LDM (3). Much like normal LDM but also copies SPSR
1144 for( tmp=0; tmp < 16; tmp++ ) {
1145 if( (ir & (1<<tmp)) ) {
1147 armr.r[tmp] = arm_read_long(operand);
1148 operand += poststep;
1152 if( armr.t ) PC &= 0xFFFFFFFE;
1153 else PC &= 0xFFFFFFFC;
1155 /* LDM/STM (2). As normal LDM but accesses the User banks
1156 * instead of the active ones. Aka the truly evil case
1161 else if( IS_EXCEPTION_MODE() )
1163 else bank_start = 15;
1164 for( tmp=0; tmp<bank_start; tmp++ ) {
1165 if( (ir & (1<<tmp)) ) {
1168 armr.r[tmp] = arm_read_long(operand);
1170 arm_write_long( operand, armr.r[tmp] );
1172 operand += poststep;
1175 for( ; tmp < 15; tmp ++ ) {
1176 if( (ir & (1<<tmp)) ) {
1179 armr.user_r[tmp-8] = arm_read_long(operand);
1181 arm_write_long( operand, armr.user_r[tmp-8] );
1183 operand += poststep;
1189 /* Actually can't happen, but anyway... */
1190 armr.r[15] = arm_read_long(operand);
1192 arm_write_long( operand, armr.r[15]+ STM_R15_OFFSET - 4 );
1194 operand += poststep;
1198 /* Normal LDM/STM */
1199 for( tmp=0; tmp < 16; tmp++ ) {
1200 if( (ir & (1<<tmp)) ) {
1203 armr.r[tmp] = arm_read_long(operand);
1206 arm_write_long( operand,
1207 armr.r[15] + STM_R15_OFFSET - 4 );
1209 arm_write_long( operand, armr.r[tmp] );
1211 operand += poststep;
1220 if( (ir & 0x0F000000) == 0x0F000000 ) { /* SWI */
1221 arm_raise_exception( EXC_SOFTWARE );
.