filename | src/aica/armcore.c |
changeset | 11:0a82ef380c45 |
prev | 7:976a16e92aab |
next | 30:89b30313d757 |
author | nkeynes |
date | Sun Dec 11 12:00:09 2005 +0000 (18 years ago) |
permissions | -rw-r--r-- |
last change | Moved arm material under aica/ Hooked arm disasm up |
view | annotate | diff | log | raw |
2 #include "aica/armcore.h"
4 struct arm_registers armr;
6 /* NB: The arm has a different memory map, but for the meantime... */
7 /* Page references are as per ARM DDI 0100E (June 2000) */
9 #define MEM_READ_BYTE( addr ) arm_read_byte(addr)
10 #define MEM_READ_WORD( addr ) arm_read_word(addr)
11 #define MEM_READ_LONG( addr ) arm_read_long(addr)
12 #define MEM_WRITE_BYTE( addr, val ) arm_write_byte(addr, val)
13 #define MEM_WRITE_WORD( addr, val ) arm_write_word(addr, val)
14 #define MEM_WRITE_LONG( addr, val ) arm_write_long(addr, val)
17 #define IS_NOTBORROW( result, op1, op2 ) (op2 > op1 ? 0 : 1)
18 #define IS_CARRY( result, op1, op2 ) (result < op1 ? 1 : 0)
19 #define IS_SUBOVERFLOW( result, op1, op2 ) (((op1^op2) & (result^op1)) >> 31)
20 #define IS_ADDOVERFLOW( result, op1, op2 ) (((op1&op2) & (result^op1)) >> 31)
22 #define PC armr.r[15]
24 /* Instruction fields */
25 #define COND(ir) (ir>>28)
26 #define GRP(ir) ((ir>>26)&0x03)
27 #define OPCODE(ir) ((ir>>20)&0x1F)
28 #define IFLAG(ir) (ir&0x02000000)
29 #define SFLAG(ir) (ir&0x00100000)
30 #define PFLAG(ir) (ir&0x01000000)
31 #define UFLAG(ir) (ir&0x00800000)
32 #define BFLAG(ir) (ir&0x00400000)
33 #define WFLAG(ir) (IR&0x00200000)
34 #define LFLAG(ir) SFLAG(ir)
35 #define RN(ir) (armr.r[((ir>>16)&0x0F)] + (((ir>>16)&0x0F) == 0x0F ? 4 : 0))
36 #define RD(ir) (armr.r[((ir>>12)&0x0F)] + (((ir>>16)&0x0F) == 0x0F ? 4 : 0))
37 #define RDn(ir) ((ir>>12)&0x0F)
38 #define RS(ir) (armr.r[((ir>>8)&0x0F)] + (((ir>>16)&0x0F) == 0x0F ? 4 : 0))
39 #define RM(ir) (armr.r[(ir&0x0F)] + (((ir>>16)&0x0F) == 0x0F ? 4 : 0))
40 #define LRN(ir) armr.r[((ir>>16)&0x0F)]
41 #define LRD(ir) armr.r[((ir>>12)&0x0F)]
42 #define LRS(ir) armr.r[((ir>>8)&0x0F)]
43 #define LRM(ir) armr.r[(ir&0x0F)]
45 #define IMM8(ir) (ir&0xFF)
46 #define IMM12(ir) (ir&0xFFF)
47 #define SHIFTIMM(ir) ((ir>>7)&0x1F)
48 #define IMMROT(ir) ((ir>>7)&0x1E)
49 #define SHIFT(ir) ((ir>>4)&0x07)
50 #define DISP24(ir) ((ir&0x00FFFFFF))
51 #define UNDEF(ir) do{ ERROR( "Raising exception on undefined instruction at %08x, opcode = %04x", PC, ir ); return; } while(0)
52 #define UNIMP(ir) do{ ERROR( "Halted on unimplemented instruction at %08x, opcode = %04x", PC, ir ); return; }while(0)
54 void arm_restore_cpsr()
55 {
57 }
59 static uint32_t arm_get_shift_operand( uint32_t ir )
60 {
61 uint32_t operand, tmp;
62 if( IFLAG(ir) == 0 ) {
63 operand = RM(ir);
64 switch(SHIFT(ir)) {
65 case 0: /* (Rm << imm) */
66 operand = operand << SHIFTIMM(ir);
67 break;
68 case 1: /* (Rm << Rs) */
69 tmp = RS(ir)&0xFF;
70 if( tmp > 31 ) operand = 0;
71 else operand = operand << tmp;
72 break;
73 case 2: /* (Rm >> imm) */
74 operand = operand >> SHIFTIMM(ir);
75 break;
76 case 3: /* (Rm >> Rs) */
77 tmp = RS(ir) & 0xFF;
78 if( tmp > 31 ) operand = 0;
79 else operand = operand >> ir;
80 break;
81 case 4: /* (Rm >>> imm) */
82 tmp = SHIFTIMM(ir);
83 if( tmp == 0 ) operand = ((int32_t)operand) >> 31;
84 else operand = ((int32_t)operand) >> tmp;
85 break;
86 case 5: /* (Rm >>> Rs) */
87 tmp = RS(ir) & 0xFF;
88 if( tmp > 31 ) operand = ((int32_t)operand) >> 31;
89 else operand = ((int32_t)operand) >> tmp;
90 break;
91 case 6:
92 tmp = SHIFTIMM(ir);
93 if( tmp == 0 ) /* RRX aka rotate with carry */
94 operand = (operand >> 1) | (armr.c<<31);
95 else
96 operand = ROTATE_RIGHT_LONG(operand,tmp);
97 break;
98 case 7:
99 tmp = RS(ir)&0x1F;
100 operand = ROTATE_RIGHT_LONG(operand,tmp);
101 break;
102 }
103 } else {
104 operand = IMM8(ir);
105 tmp = IMMROT(ir);
106 operand = ROTATE_RIGHT_LONG(operand, tmp);
107 }
108 return operand;
109 }
111 /**
112 * Compute the "shift operand" of the instruction for the data processing
113 * instructions. This variant also sets armr.shift_c (carry result for shifter)
114 * Reason for the variants is that most cases don't actually need the shift_c.
115 */
116 static uint32_t arm_get_shift_operand_s( uint32_t ir )
117 {
118 uint32_t operand, tmp;
119 if( IFLAG(ir) == 0 ) {
120 operand = RM(ir);
121 switch(SHIFT(ir)) {
122 case 0: /* (Rm << imm) */
123 tmp = SHIFTIMM(ir);
124 if( tmp == 0 ) { /* Rm */
125 armr.shift_c = armr.c;
126 } else { /* Rm << imm */
127 armr.shift_c = (operand >> (32-tmp)) & 0x01;
128 operand = operand << tmp;
129 }
130 break;
131 case 1: /* (Rm << Rs) */
132 tmp = RS(ir)&0xFF;
133 if( tmp == 0 ) {
134 armr.shift_c = armr.c;
135 } else {
136 if( tmp <= 32 )
137 armr.shift_c = (operand >> (32-tmp)) & 0x01;
138 else armr.shift_c = 0;
139 if( tmp < 32 )
140 operand = operand << tmp;
141 else operand = 0;
142 }
143 break;
144 case 2: /* (Rm >> imm) */
145 tmp = SHIFTIMM(ir);
146 if( tmp == 0 ) {
147 armr.shift_c = operand >> 31;
148 operand = 0;
149 } else {
150 armr.shift_c = (operand >> (tmp-1)) & 0x01;
151 operand = RM(ir) >> tmp;
152 }
153 break;
154 case 3: /* (Rm >> Rs) */
155 tmp = RS(ir) & 0xFF;
156 if( tmp == 0 ) {
157 armr.shift_c = armr.c;
158 } else {
159 if( tmp <= 32 )
160 armr.shift_c = (operand >> (tmp-1))&0x01;
161 else armr.shift_c = 0;
162 if( tmp < 32 )
163 operand = operand >> tmp;
164 else operand = 0;
165 }
166 break;
167 case 4: /* (Rm >>> imm) */
168 tmp = SHIFTIMM(ir);
169 if( tmp == 0 ) {
170 armr.shift_c = operand >> 31;
171 operand = -armr.shift_c;
172 } else {
173 armr.shift_c = (operand >> (tmp-1)) & 0x01;
174 operand = ((int32_t)operand) >> tmp;
175 }
176 break;
177 case 5: /* (Rm >>> Rs) */
178 tmp = RS(ir) & 0xFF;
179 if( tmp == 0 ) {
180 armr.shift_c = armr.c;
181 } else {
182 if( tmp < 32 ) {
183 armr.shift_c = (operand >> (tmp-1))&0x01;
184 operand = ((int32_t)operand) >> tmp;
185 } else {
186 armr.shift_c = operand >> 31;
187 operand = ((int32_t)operand) >> 31;
188 }
189 }
190 break;
191 case 6:
192 tmp = SHIFTIMM(ir);
193 if( tmp == 0 ) { /* RRX aka rotate with carry */
194 armr.shift_c = operand&0x01;
195 operand = (operand >> 1) | (armr.c<<31);
196 } else {
197 armr.shift_c = operand>>(tmp-1);
198 operand = ROTATE_RIGHT_LONG(operand,tmp);
199 }
200 break;
201 case 7:
202 tmp = RS(ir)&0xFF;
203 if( tmp == 0 ) {
204 armr.shift_c = armr.c;
205 } else {
206 tmp &= 0x1F;
207 if( tmp == 0 ) {
208 armr.shift_c = operand>>31;
209 } else {
210 armr.shift_c = (operand>>(tmp-1))&0x1;
211 operand = ROTATE_RIGHT_LONG(operand,tmp);
212 }
213 }
214 break;
215 }
216 } else {
217 operand = IMM8(ir);
218 tmp = IMMROT(ir);
219 if( tmp == 0 ) {
220 armr.shift_c = armr.c;
221 } else {
222 operand = ROTATE_RIGHT_LONG(operand, tmp);
223 armr.shift_c = operand>>31;
224 }
225 }
226 return operand;
227 }
229 /**
230 * Another variant of the shifter code for index-based memory addressing.
231 * Distinguished by the fact that it doesn't support register shifts, and
232 * ignores the I flag (WTF do the load/store instructions use the I flag to
233 * mean the _exact opposite_ of what it means for the data processing
234 * instructions ???)
235 */
236 static uint32_t arm_get_address_index( uint32_t ir )
237 {
238 uint32_t operand = RM(ir);
239 uint32_t tmp;
241 switch(SHIFT(ir)) {
242 case 0: /* (Rm << imm) */
243 operand = operand << SHIFTIMM(ir);
244 break;
245 case 2: /* (Rm >> imm) */
246 operand = operand >> SHIFTIMM(ir);
247 break;
248 case 4: /* (Rm >>> imm) */
249 tmp = SHIFTIMM(ir);
250 if( tmp == 0 ) operand = ((int32_t)operand) >> 31;
251 else operand = ((int32_t)operand) >> tmp;
252 break;
253 case 6:
254 tmp = SHIFTIMM(ir);
255 if( tmp == 0 ) /* RRX aka rotate with carry */
256 operand = (operand >> 1) | (armr.c<<31);
257 else
258 operand = ROTATE_RIGHT_LONG(operand,tmp);
259 break;
260 default: UNIMP(ir);
261 }
262 return operand;
263 }
265 static uint32_t arm_get_address_operand( uint32_t ir )
266 {
267 uint32_t addr;
269 /* I P U . W */
270 switch( (ir>>21)&0x1D ) {
271 case 0: /* Rn -= imm offset (post-indexed) [5.2.8 A5-28] */
272 case 1:
273 addr = RN(ir);
274 LRN(ir) = addr - IMM12(ir);
275 break;
276 case 4: /* Rn += imm offsett (post-indexed) [5.2.8 A5-28] */
277 case 5:
278 addr = RN(ir);
279 LRN(ir) = addr + IMM12(ir);
280 break;
281 case 8: /* Rn - imm offset [5.2.2 A5-20] */
282 addr = RN(ir) - IMM12(ir);
283 break;
284 case 9: /* Rn -= imm offset (pre-indexed) [5.2.5 A5-24] */
285 addr = RN(ir) - IMM12(ir);
286 LRN(ir) = addr;
287 break;
288 case 12: /* Rn + imm offset [5.2.2 A5-20] */
289 addr = RN(ir) + IMM12(ir);
290 break;
291 case 13: /* Rn += imm offset [5.2.5 A5-24 ] */
292 addr = RN(ir) + IMM12(ir);
293 LRN(ir) = addr;
294 break;
295 case 16: /* Rn -= Rm (post-indexed) [5.2.10 A5-32 ] */
296 case 17:
297 addr = RN(ir);
298 LRN(ir) = addr - arm_get_address_index(ir);
299 break;
300 case 20: /* Rn += Rm (post-indexed) [5.2.10 A5-32 ] */
301 case 21:
302 addr = RN(ir);
303 LRN(ir) = addr - arm_get_address_index(ir);
304 break;
305 case 24: /* Rn - Rm [5.2.4 A5-23] */
306 addr = RN(ir) - arm_get_address_index(ir);
307 break;
308 case 25: /* RN -= Rm (pre-indexed) [5.2.7 A5-26] */
309 addr = RN(ir) - arm_get_address_index(ir);
310 LRN(ir) = addr;
311 break;
312 case 28: /* Rn + Rm [5.2.4 A5-23] */
313 addr = RN(ir) + arm_get_address_index(ir);
314 break;
315 case 29: /* RN += Rm (pre-indexed) [5.2.7 A5-26] */
316 addr = RN(ir) + arm_get_address_index(ir);
317 LRN(ir) = addr;
318 break;
319 default:
320 UNIMP(ir); /* Unreachable */
321 }
322 return addr;
323 }
325 void arm_execute_instruction( void )
326 {
327 uint32_t pc = PC;
328 uint32_t ir = MEM_READ_LONG(pc);
329 uint32_t operand, operand2, tmp, cond;
331 pc += 4;
332 PC = pc;
334 switch( COND(ir) ) {
335 case 0: /* EQ */
336 cond = armr.z;
337 break;
338 case 1: /* NE */
339 cond = !armr.z;
340 break;
341 case 2: /* CS/HS */
342 cond = armr.c;
343 break;
344 case 3: /* CC/LO */
345 cond = !armr.c;
346 break;
347 case 4: /* MI */
348 cond = armr.n;
349 break;
350 case 5: /* PL */
351 cond = !armr.n;
352 break;
353 case 6: /* VS */
354 cond = armr.v;
355 break;
356 case 7: /* VC */
357 cond = !armr.v;
358 break;
359 case 8: /* HI */
360 cond = armr.c && !armr.z;
361 break;
362 case 9: /* LS */
363 cond = (!armr.c) || armr.z;
364 break;
365 case 10: /* GE */
366 cond = (armr.n == armr.v);
367 break;
368 case 11: /* LT */
369 cond = (armr.n != armr.v);
370 break;
371 case 12: /* GT */
372 cond = (!armr.z) && (armr.n == armr.v);
373 break;
374 case 13: /* LE */
375 cond = armr.z || (armr.n != armr.v);
376 break;
377 case 14: /* AL */
378 cond = 1;
379 break;
380 case 15: /* (NV) */
381 cond = 0;
382 UNDEF(ir);
383 }
385 switch( GRP(ir) ) {
386 case 0:
387 if( (ir & 0x0D900000) == 0x01000000 ) {
388 /* Instructions that aren't actual data processing */
389 switch( ir & 0x0FF000F0 ) {
390 case 0x01200010: /* BX */
391 break;
392 case 0x01000000: /* MRS Rd, CPSR */
393 break;
394 case 0x01400000: /* MRS Rd, SPSR */
395 break;
396 case 0x01200000: /* MSR CPSR, Rd */
397 break;
398 case 0x01600000: /* MSR SPSR, Rd */
399 break;
400 case 0x03200000: /* MSR CPSR, imm */
401 break;
402 case 0x03600000: /* MSR SPSR, imm */
403 break;
404 default:
405 UNIMP(ir);
406 }
407 } else if( (ir & 0x0E000090) == 0x00000090 ) {
408 /* Neither are these */
409 switch( (ir>>5)&0x03 ) {
410 case 0:
411 /* Arithmetic extension area */
412 switch(OPCODE(ir)) {
413 case 0: /* MUL */
414 break;
415 case 1: /* MULS */
416 break;
417 case 2: /* MLA */
418 break;
419 case 3: /* MLAS */
420 break;
421 case 8: /* UMULL */
422 break;
423 case 9: /* UMULLS */
424 break;
425 case 10: /* UMLAL */
426 break;
427 case 11: /* UMLALS */
428 break;
429 case 12: /* SMULL */
430 break;
431 case 13: /* SMULLS */
432 break;
433 case 14: /* SMLAL */
434 break;
435 case 15: /* SMLALS */
436 break;
437 case 16: /* SWP */
438 break;
439 case 20: /* SWPB */
440 break;
441 default:
442 UNIMP(ir);
443 }
444 break;
445 case 1:
446 if( LFLAG(ir) ) {
447 /* LDRH */
448 } else {
449 /* STRH */
450 }
451 break;
452 case 2:
453 if( LFLAG(ir) ) {
454 /* LDRSB */
455 } else {
456 UNIMP(ir);
457 }
458 break;
459 case 3:
460 if( LFLAG(ir) ) {
461 /* LDRSH */
462 } else {
463 UNIMP(ir);
464 }
465 break;
466 }
467 } else {
468 /* Data processing */
470 switch(OPCODE(ir)) {
471 case 0: /* AND Rd, Rn, operand */
472 LRD(ir) = RN(ir) & arm_get_shift_operand(ir);
473 break;
474 case 1: /* ANDS Rd, Rn, operand */
475 operand = arm_get_shift_operand_s(ir) & RN(ir);
476 LRD(ir) = operand;
477 if( RDn(ir) == 15 ) {
478 arm_restore_cpsr();
479 } else {
480 armr.n = operand>>31;
481 armr.z = (operand == 0);
482 armr.c = armr.shift_c;
483 }
484 break;
485 case 2: /* EOR Rd, Rn, operand */
486 LRD(ir) = RN(ir) ^ arm_get_shift_operand(ir);
487 break;
488 case 3: /* EORS Rd, Rn, operand */
489 operand = arm_get_shift_operand_s(ir) ^ RN(ir);
490 LRD(ir) = operand;
491 if( RDn(ir) == 15 ) {
492 arm_restore_cpsr();
493 } else {
494 armr.n = operand>>31;
495 armr.z = (operand == 0);
496 armr.c = armr.shift_c;
497 }
498 break;
499 case 4: /* SUB Rd, Rn, operand */
500 LRD(ir) = RN(ir) - arm_get_shift_operand(ir);
501 break;
502 case 5: /* SUBS Rd, Rn, operand */
503 operand = RN(ir);
504 operand2 = arm_get_shift_operand(ir);
505 tmp = operand - operand2;
506 LRD(ir) = tmp;
507 if( RDn(ir) == 15 ) {
508 arm_restore_cpsr();
509 } else {
510 armr.n = tmp>>31;
511 armr.z = (tmp == 0);
512 armr.c = IS_NOTBORROW(tmp,operand,operand2);
513 armr.v = IS_SUBOVERFLOW(tmp,operand,operand2);
514 }
515 break;
516 case 6: /* RSB Rd, operand, Rn */
517 LRD(ir) = arm_get_shift_operand(ir) - RN(ir);
518 break;
519 case 7: /* RSBS Rd, operand, Rn */
520 operand = arm_get_shift_operand(ir);
521 operand2 = RN(ir);
522 tmp = operand - operand2;
523 LRD(ir) = tmp;
524 if( RDn(ir) == 15 ) {
525 arm_restore_cpsr();
526 } else {
527 armr.n = tmp>>31;
528 armr.z = (tmp == 0);
529 armr.c = IS_NOTBORROW(tmp,operand,operand2);
530 armr.v = IS_SUBOVERFLOW(tmp,operand,operand2);
531 }
532 break;
533 case 8: /* ADD Rd, Rn, operand */
534 LRD(ir) = RN(ir) + arm_get_shift_operand(ir);
535 break;
536 case 9: /* ADDS Rd, Rn, operand */
537 operand = arm_get_shift_operand(ir);
538 operand2 = RN(ir);
539 tmp = operand + operand2;
540 LRD(ir) = tmp;
541 if( RDn(ir) == 15 ) {
542 arm_restore_cpsr();
543 } else {
544 armr.n = tmp>>31;
545 armr.z = (tmp == 0);
546 armr.c = IS_CARRY(tmp,operand,operand2);
547 armr.v = IS_ADDOVERFLOW(tmp,operand,operand2);
548 }
549 break;
550 case 10: /* ADC */
551 case 11: /* ADCS */
552 case 12: /* SBC */
553 case 13: /* SBCS */
554 case 14: /* RSC */
555 case 15: /* RSCS */
556 break;
557 case 17: /* TST Rn, operand */
558 operand = arm_get_shift_operand_s(ir) & RN(ir);
559 armr.n = operand>>31;
560 armr.z = (operand == 0);
561 armr.c = armr.shift_c;
562 break;
563 case 19: /* TEQ Rn, operand */
564 operand = arm_get_shift_operand_s(ir) ^ RN(ir);
565 armr.n = operand>>31;
566 armr.z = (operand == 0);
567 armr.c = armr.shift_c;
568 break;
569 case 21: /* CMP Rn, operand */
570 operand = RN(ir);
571 operand2 = arm_get_shift_operand(ir);
572 tmp = operand - operand2;
573 armr.n = tmp>>31;
574 armr.z = (tmp == 0);
575 armr.c = IS_NOTBORROW(tmp,operand,operand2);
576 armr.v = IS_SUBOVERFLOW(tmp,operand,operand2);
577 break;
578 case 23: /* CMN Rn, operand */
579 operand = RN(ir);
580 operand2 = arm_get_shift_operand(ir);
581 tmp = operand + operand2;
582 armr.n = tmp>>31;
583 armr.z = (tmp == 0);
584 armr.c = IS_CARRY(tmp,operand,operand2);
585 armr.v = IS_ADDOVERFLOW(tmp,operand,operand2);
586 break;
587 case 24: /* ORR Rd, Rn, operand */
588 LRD(ir) = RN(ir) | arm_get_shift_operand(ir);
589 break;
590 case 25: /* ORRS Rd, Rn, operand */
591 operand = arm_get_shift_operand_s(ir) | RN(ir);
592 LRD(ir) = operand;
593 if( RDn(ir) == 15 ) {
594 arm_restore_cpsr();
595 } else {
596 armr.n = operand>>31;
597 armr.z = (operand == 0);
598 armr.c = armr.shift_c;
599 }
600 break;
601 case 26: /* MOV Rd, operand */
602 LRD(ir) = arm_get_shift_operand(ir);
603 break;
604 case 27: /* MOVS Rd, operand */
605 operand = arm_get_shift_operand_s(ir);
606 LRD(ir) = operand;
607 if( RDn(ir) == 15 ) {
608 arm_restore_cpsr();
609 } else {
610 armr.n = operand>>31;
611 armr.z = (operand == 0);
612 armr.c = armr.shift_c;
613 }
614 break;
615 case 28: /* BIC Rd, Rn, operand */
616 LRD(ir) = RN(ir) & (~arm_get_shift_operand(ir));
617 break;
618 case 29: /* BICS Rd, Rn, operand */
619 operand = RN(ir) & (~arm_get_shift_operand_s(ir));
620 LRD(ir) = operand;
621 if( RDn(ir) == 15 ) {
622 arm_restore_cpsr();
623 } else {
624 armr.n = operand>>31;
625 armr.z = (operand == 0);
626 armr.c = armr.shift_c;
627 }
628 break;
629 case 30: /* MVN Rd, operand */
630 LRD(ir) = ~arm_get_shift_operand(ir);
631 break;
632 case 31: /* MVNS Rd, operand */
633 operand = ~arm_get_shift_operand_s(ir);
634 LRD(ir) = operand;
635 if( RDn(ir) == 15 ) {
636 arm_restore_cpsr();
637 } else {
638 armr.n = operand>>31;
639 armr.z = (operand == 0);
640 armr.c = armr.shift_c;
641 }
642 break;
643 default:
644 UNIMP(ir);
645 }
646 }
647 break;
648 case 1: /* Load/store */
649 break;
650 case 2: /* Load/store multiple, branch*/
651 break;
652 case 3: /* Copro */
653 break;
654 }
655 }
.