4 * SH4 parent module for all CPU modes and SH4 peripheral
7 * Copyright (c) 2005 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #define MODULE sh4_module
25 #include "dreamcast.h"
33 #include "sh4/sh4core.h"
34 #include "sh4/sh4dasm.h"
35 #include "sh4/sh4mmio.h"
36 #include "sh4/sh4stat.h"
37 #include "sh4/sh4trans.h"
38 #include "xlat/xltcache.h"
41 #define M_PI 3.14159265358979323846264338327950288
44 void sh4_init( void );
45 void sh4_poweron_reset( void );
46 void sh4_start( void );
47 void sh4_stop( void );
48 void sh4_save_state( FILE *f );
49 int sh4_load_state( FILE *f );
50 size_t sh4_debug_read_phys( unsigned char *buf, uint32_t addr, size_t length );
51 size_t sh4_debug_write_phys( uint32_t addr, unsigned char *buf, size_t length );
52 size_t sh4_debug_read_vma( unsigned char *buf, uint32_t addr, size_t length );
53 size_t sh4_debug_write_vma( uint32_t addr, unsigned char *buf, size_t length );
55 uint32_t sh4_run_slice( uint32_t );
57 /* Note: this must match GDB's ordering */
58 const struct reg_desc_struct sh4_reg_map[] =
59 { {"R0", REG_TYPE_INT, &sh4r.r[0]}, {"R1", REG_TYPE_INT, &sh4r.r[1]},
60 {"R2", REG_TYPE_INT, &sh4r.r[2]}, {"R3", REG_TYPE_INT, &sh4r.r[3]},
61 {"R4", REG_TYPE_INT, &sh4r.r[4]}, {"R5", REG_TYPE_INT, &sh4r.r[5]},
62 {"R6", REG_TYPE_INT, &sh4r.r[6]}, {"R7", REG_TYPE_INT, &sh4r.r[7]},
63 {"R8", REG_TYPE_INT, &sh4r.r[8]}, {"R9", REG_TYPE_INT, &sh4r.r[9]},
64 {"R10",REG_TYPE_INT, &sh4r.r[10]}, {"R11",REG_TYPE_INT, &sh4r.r[11]},
65 {"R12",REG_TYPE_INT, &sh4r.r[12]}, {"R13",REG_TYPE_INT, &sh4r.r[13]},
66 {"R14",REG_TYPE_INT, &sh4r.r[14]}, {"R15",REG_TYPE_INT, &sh4r.r[15]},
67 {"PC", REG_TYPE_INT, &sh4r.pc}, {"PR", REG_TYPE_INT, &sh4r.pr},
68 {"GBR", REG_TYPE_INT, &sh4r.gbr}, {"VBR",REG_TYPE_INT, &sh4r.vbr},
69 {"MACH",REG_TYPE_INT, ((uint32_t *)&sh4r.mac)+1}, {"MACL",REG_TYPE_INT, &sh4r.mac},
70 {"SR", REG_TYPE_INT, &sh4r.sr},
71 {"FPUL", REG_TYPE_INT, &sh4r.fpul.i}, {"FPSCR", REG_TYPE_INT, &sh4r.fpscr},
73 {"FR0", REG_TYPE_FLOAT, &sh4r.fr[0][1] },{"FR1", REG_TYPE_FLOAT, &sh4r.fr[0][0]},
74 {"FR2", REG_TYPE_FLOAT, &sh4r.fr[0][3] },{"FR3", REG_TYPE_FLOAT, &sh4r.fr[0][2]},
75 {"FR4", REG_TYPE_FLOAT, &sh4r.fr[0][5] },{"FR5", REG_TYPE_FLOAT, &sh4r.fr[0][4]},
76 {"FR6", REG_TYPE_FLOAT, &sh4r.fr[0][7] },{"FR7", REG_TYPE_FLOAT, &sh4r.fr[0][6]},
77 {"FR8", REG_TYPE_FLOAT, &sh4r.fr[0][9] },{"FR9", REG_TYPE_FLOAT, &sh4r.fr[0][8]},
78 {"FR10", REG_TYPE_FLOAT, &sh4r.fr[0][11] },{"FR11", REG_TYPE_FLOAT, &sh4r.fr[0][10]},
79 {"FR12", REG_TYPE_FLOAT, &sh4r.fr[0][13] },{"FR13", REG_TYPE_FLOAT, &sh4r.fr[0][12]},
80 {"FR14", REG_TYPE_FLOAT, &sh4r.fr[0][15] },{"FR15", REG_TYPE_FLOAT, &sh4r.fr[0][14]},
82 {"SSR",REG_TYPE_INT, &sh4r.ssr}, {"SPC", REG_TYPE_INT, &sh4r.spc},
84 {"R0B0", REG_TYPE_INT, NULL}, {"R1B0", REG_TYPE_INT, NULL},
85 {"R2B0", REG_TYPE_INT, NULL}, {"R3B0", REG_TYPE_INT, NULL},
86 {"R4B0", REG_TYPE_INT, NULL}, {"R5B0", REG_TYPE_INT, NULL},
87 {"R6B0", REG_TYPE_INT, NULL}, {"R7B0", REG_TYPE_INT, NULL},
88 {"R0B1", REG_TYPE_INT, NULL}, {"R1B1", REG_TYPE_INT, NULL},
89 {"R2B1", REG_TYPE_INT, NULL}, {"R3B1", REG_TYPE_INT, NULL},
90 {"R4B1", REG_TYPE_INT, NULL}, {"R5B1", REG_TYPE_INT, NULL},
91 {"R6B1", REG_TYPE_INT, NULL}, {"R7B1", REG_TYPE_INT, NULL},
93 {"SGR",REG_TYPE_INT, &sh4r.sgr}, {"DBR", REG_TYPE_INT, &sh4r.dbr},
95 {"XF0", REG_TYPE_FLOAT, &sh4r.fr[1][1] },{"XF1", REG_TYPE_FLOAT, &sh4r.fr[1][0]},
96 {"XF2", REG_TYPE_FLOAT, &sh4r.fr[1][3] },{"XF3", REG_TYPE_FLOAT, &sh4r.fr[1][2]},
97 {"XF4", REG_TYPE_FLOAT, &sh4r.fr[1][5] },{"XF5", REG_TYPE_FLOAT, &sh4r.fr[1][4]},
98 {"XF6", REG_TYPE_FLOAT, &sh4r.fr[1][7] },{"XF7", REG_TYPE_FLOAT, &sh4r.fr[1][6]},
99 {"XF8", REG_TYPE_FLOAT, &sh4r.fr[1][9] },{"XF9", REG_TYPE_FLOAT, &sh4r.fr[1][8]},
100 {"XF10", REG_TYPE_FLOAT, &sh4r.fr[1][11] },{"XF11", REG_TYPE_FLOAT, &sh4r.fr[1][10]},
101 {"XF12", REG_TYPE_FLOAT, &sh4r.fr[1][13] },{"XF13", REG_TYPE_FLOAT, &sh4r.fr[1][12]},
102 {"XF14", REG_TYPE_FLOAT, &sh4r.fr[1][15] },{"XF15", REG_TYPE_FLOAT, &sh4r.fr[1][14]},
106 void *sh4_get_register( int reg )
108 if( reg < 0 || reg >= 94 ) {
110 } else if( reg < 43 ) {
111 return sh4_reg_map[reg].value;
112 } else if( reg < 51 ) {
114 if( (sh4r.sr & SR_MDRB) == SR_MDRB ) {
115 /* bank 1 is primary */
116 return &sh4r.r_bank[reg-43];
118 return &sh4r.r[reg-43];
120 } else if( reg < 59 ) {
122 if( (sh4r.sr & SR_MDRB) == SR_MDRB ) {
123 /* bank 1 is primary */
124 return &sh4r.r[reg-43];
126 return &sh4r.r_bank[reg-43];
129 return NULL; /* not supported at the moment */
134 const struct cpu_desc_struct sh4_cpu_desc =
135 { "SH4", sh4_disasm_instruction, sh4_get_register, sh4_has_page,
136 sh4_debug_read_phys, sh4_debug_write_phys, sh4_debug_read_vma, sh4_debug_write_vma,
137 sh4_execute_instruction,
138 sh4_set_breakpoint, sh4_clear_breakpoint, sh4_get_breakpoint, 2,
139 (char *)&sh4r, sizeof(sh4r), sh4_reg_map, 23, 59,
142 struct dreamcast_module sh4_module = { "SH4", sh4_init, sh4_poweron_reset,
143 sh4_start, sh4_run_slice, sh4_stop,
144 sh4_save_state, sh4_load_state };
146 struct sh4_registers sh4r __attribute__((aligned(16)));
147 struct breakpoint_struct sh4_breakpoints[MAX_BREAKPOINTS];
148 int sh4_breakpoint_count = 0;
150 gboolean sh4_starting = FALSE;
151 static gboolean sh4_use_translator = FALSE;
152 static jmp_buf sh4_exit_jmp_buf;
153 static gboolean sh4_running = FALSE;
154 struct sh4_icache_struct sh4_icache = { NULL, -1, -1, 0 };
156 /* At the moment this is a dummy event to mark the end of the
159 void sh4_dummy_event(int eventid)
163 void sh4_set_core( sh4core_t core )
165 // No-op if the translator was not built
166 #ifdef SH4_TRANSLATOR
167 if( core != SH4_INTERPRET ) {
168 sh4_translate_init();
169 sh4_use_translator = TRUE;
170 if( core == SH4_SHADOW ) {
174 sh4_use_translator = FALSE;
179 gboolean sh4_translate_is_enabled()
181 return sh4_use_translator;
186 register_io_regions( mmio_list_sh4mmio );
187 register_event_callback( EVENT_ENDTIMESLICE, sh4_dummy_event );
192 #ifdef ENABLE_SH4STATS
202 void sh4_poweron_reset(void)
204 /* zero everything out, for the sake of having a consistent state. */
205 memset( &sh4r, 0, sizeof(sh4r) );
206 if( sh4_use_translator ) {
210 /* Resume running if we were halted */
211 sh4r.sh4_state = SH4_STATE_RUNNING;
213 sh4r.pc = 0xA0000000;
214 sh4r.new_pc= 0xA0000002;
215 sh4r.vbr = 0x00000000;
216 sh4r.fpscr = 0x00040001;
217 sh4_write_sr(0x700000F0);
219 /* Mem reset will do this, but if we want to reset _just_ the SH4... */
220 MMIO_WRITE( MMU, EXPEVT, EXC_POWER_RESET );
222 /* Peripheral modules */
234 if( sh4_use_translator ) {
235 /* If we were running with the translator, update new_pc and in_delay_slot */
236 sh4r.new_pc = sh4r.pc+2;
237 sh4r.in_delay_slot = FALSE;
238 if( sh4_translate_get_profile_blocks() ) {
239 sh4_translate_dump_cache_by_activity(30);
246 * Execute a timeslice using translated code only (ie translate/execute loop)
248 uint32_t sh4_run_slice( uint32_t nanosecs )
250 sh4r.slice_cycle = 0;
252 /* Setup for sudden vm exits */
253 switch( setjmp(sh4_exit_jmp_buf) ) {
254 case CORE_EXIT_BREAKPOINT:
255 sh4_clear_breakpoint( sh4r.pc, BREAK_ONESHOT );
258 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
259 TMU_run_slice( sh4r.slice_cycle );
260 SCIF_run_slice( sh4r.slice_cycle );
261 PMM_run_slice( sh4r.slice_cycle );
263 return sh4r.slice_cycle;
265 case CORE_EXIT_SYSRESET:
268 case CORE_EXIT_SLEEP:
270 case CORE_EXIT_FLUSH_ICACHE:
275 if( sh4r.sh4_state != SH4_STATE_RUNNING ) {
276 sh4_sleep_run_slice(nanosecs);
280 /* Execute the core's real slice */
281 #ifdef SH4_TRANSLATOR
282 if( sh4_use_translator ) {
283 sh4_translate_run_slice(nanosecs);
285 sh4_emulate_run_slice(nanosecs);
288 sh4_emulate_run_slice(nanosecs);
292 /* And finish off the peripherals afterwards */
295 sh4_starting = FALSE;
296 sh4r.slice_cycle = nanosecs;
297 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
298 TMU_run_slice( nanosecs );
299 SCIF_run_slice( nanosecs );
300 PMM_run_slice( sh4r.slice_cycle );
305 void sh4_core_exit( int exit_code )
308 #ifdef SH4_TRANSLATOR
309 if( sh4_use_translator ) {
310 if( exit_code == CORE_EXIT_EXCEPTION ) {
311 sh4_translate_exception_exit_recover();
313 sh4_translate_exit_recover();
317 if( exit_code != CORE_EXIT_EXCEPTION &&
318 exit_code != CORE_EXIT_BREAKPOINT ) {
319 sh4_finalize_instruction();
321 // longjmp back into sh4_run_slice
323 longjmp(sh4_exit_jmp_buf, exit_code);
327 void sh4_save_state( FILE *f )
329 if( sh4_use_translator ) {
330 /* If we were running with the translator, update new_pc and in_delay_slot */
331 sh4r.new_pc = sh4r.pc+2;
332 sh4r.in_delay_slot = FALSE;
335 fwrite( &sh4r, offsetof(struct sh4_registers, xlat_sh4_mode), 1, f );
339 INTC_save_state( f );
341 SCIF_save_state( f );
344 int sh4_load_state( FILE * f )
346 if( sh4_use_translator ) {
349 fread( &sh4r, offsetof(struct sh4_registers, xlat_sh4_mode), 1, f );
350 sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
354 INTC_load_state( f );
356 return SCIF_load_state( f );
359 void sh4_set_breakpoint( uint32_t pc, breakpoint_type_t type )
361 sh4_breakpoints[sh4_breakpoint_count].address = pc;
362 sh4_breakpoints[sh4_breakpoint_count].type = type;
363 if( sh4_use_translator ) {
364 xlat_invalidate_word( pc );
366 sh4_breakpoint_count++;
369 gboolean sh4_clear_breakpoint( uint32_t pc, breakpoint_type_t type )
373 for( i=0; i<sh4_breakpoint_count; i++ ) {
374 if( sh4_breakpoints[i].address == pc &&
375 sh4_breakpoints[i].type == type ) {
376 while( ++i < sh4_breakpoint_count ) {
377 sh4_breakpoints[i-1].address = sh4_breakpoints[i].address;
378 sh4_breakpoints[i-1].type = sh4_breakpoints[i].type;
380 if( sh4_use_translator ) {
381 xlat_invalidate_word( pc );
383 sh4_breakpoint_count--;
390 int sh4_get_breakpoint( uint32_t pc )
393 for( i=0; i<sh4_breakpoint_count; i++ ) {
394 if( sh4_breakpoints[i].address == pc )
395 return sh4_breakpoints[i].type;
400 void sh4_set_pc( int pc )
406 void sh4_set_event_pending( uint32_t cycles )
408 sh4r.event_pending = cycles;
412 * Dump all SH4 core information for crash-dump purposes
416 cpu_print_registers( stderr, &sh4_cpu_desc );
417 #ifdef SH4_TRANSLATOR
418 if( sh4_use_translator ) {
419 sh4_translate_crashdump();
420 } /* Nothing really to print for emu core */
425 /******************************* Support methods ***************************/
427 static void sh4_switch_banks( )
431 memcpy( tmp, sh4r.r, sizeof(uint32_t)*8 );
432 memcpy( sh4r.r, sh4r.r_bank, sizeof(uint32_t)*8 );
433 memcpy( sh4r.r_bank, tmp, sizeof(uint32_t)*8 );
436 void FASTCALL sh4_switch_fr_banks()
439 for( i=0; i<16; i++ ) {
440 float tmp = sh4r.fr[0][i];
441 sh4r.fr[0][i] = sh4r.fr[1][i];
446 void FASTCALL sh4_write_sr( uint32_t newval )
448 int oldbank = (sh4r.sr&SR_MDRB) == SR_MDRB;
449 int newbank = (newval&SR_MDRB) == SR_MDRB;
450 if( oldbank != newbank )
452 sh4r.sr = newval & SR_MASK;
453 sh4r.t = (newval&SR_T) ? 1 : 0;
454 sh4r.s = (newval&SR_S) ? 1 : 0;
455 sh4r.m = (newval&SR_M) ? 1 : 0;
456 sh4r.q = (newval&SR_Q) ? 1 : 0;
457 sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
461 void FASTCALL sh4_write_fpscr( uint32_t newval )
463 if( (sh4r.fpscr ^ newval) & FPSCR_FR ) {
464 sh4_switch_fr_banks();
466 sh4r.fpscr = newval & FPSCR_MASK;
467 sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
470 uint32_t FASTCALL sh4_read_sr( void )
472 /* synchronize sh4r.sr with the various bitflags */
473 sh4r.sr &= SR_MQSTMASK;
474 if( sh4r.t ) sh4r.sr |= SR_T;
475 if( sh4r.s ) sh4r.sr |= SR_S;
476 if( sh4r.m ) sh4r.sr |= SR_M;
477 if( sh4r.q ) sh4r.sr |= SR_Q;
482 * Raise a CPU reset exception with the specified exception code.
484 void FASTCALL sh4_raise_reset( int code )
486 MMIO_WRITE(MMU,EXPEVT,code);
487 sh4r.vbr = 0x00000000;
488 sh4r.pc = 0xA0000000;
489 sh4r.new_pc = sh4r.pc + 2;
490 sh4r.in_delay_slot = 0;
491 sh4_write_sr( (sh4r.sr|SR_MD|SR_BL|SR_RB|SR_IMASK)&(~SR_FD) );
493 /* Peripheral manual reset (FIXME: incomplete) */
499 void FASTCALL sh4_raise_tlb_multihit( sh4vma_t vpn )
501 MMIO_WRITE( MMU, TEA, vpn );
502 MMIO_WRITE( MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)) );
503 sh4_raise_reset( EXC_TLB_MULTI_HIT );
507 * Raise a general CPU exception for the specified exception code.
508 * (NOT for TRAPA or TLB exceptions)
510 void FASTCALL sh4_raise_exception( int code )
512 if( sh4r.sr & SR_BL ) {
513 sh4_raise_reset( EXC_MANUAL_RESET );
516 sh4r.ssr = sh4_read_sr();
517 sh4r.sgr = sh4r.r[15];
518 MMIO_WRITE(MMU,EXPEVT, code);
519 sh4r.pc = sh4r.vbr + EXV_EXCEPTION;
520 sh4r.new_pc = sh4r.pc + 2;
521 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
522 sh4r.in_delay_slot = 0;
526 void FASTCALL sh4_raise_trap( int trap )
528 MMIO_WRITE( MMU, TRA, trap<<2 );
529 MMIO_WRITE( MMU, EXPEVT, EXC_TRAP );
531 sh4r.ssr = sh4_read_sr();
532 sh4r.sgr = sh4r.r[15];
533 sh4r.pc = sh4r.vbr + EXV_EXCEPTION;
534 sh4r.new_pc = sh4r.pc + 2;
535 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
536 sh4r.in_delay_slot = 0;
539 void FASTCALL sh4_raise_tlb_exception( int code, sh4vma_t vpn )
541 MMIO_WRITE( MMU, TEA, vpn );
542 MMIO_WRITE( MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)) );
543 MMIO_WRITE( MMU, EXPEVT, code );
545 sh4r.ssr = sh4_read_sr();
546 sh4r.sgr = sh4r.r[15];
547 sh4r.pc = sh4r.vbr + EXV_TLBMISS;
548 sh4r.new_pc = sh4r.pc + 2;
549 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
550 sh4r.in_delay_slot = 0;
553 void FASTCALL sh4_reraise_exception( sh4addr_t exception_pc )
556 sh4r.ssr = sh4_read_sr();
557 sh4r.sgr = sh4r.r[15];
558 sh4r.pc = exception_pc;
559 sh4r.new_pc = sh4r.pc + 2;
560 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
561 sh4r.in_delay_slot = 0;
564 void FASTCALL sh4_accept_interrupt( void )
566 uint32_t code = intc_accept_interrupt();
567 MMIO_WRITE( MMU, INTEVT, code );
568 sh4r.ssr = sh4_read_sr();
570 sh4r.sgr = sh4r.r[15];
571 sh4_write_sr( sh4r.ssr|SR_BL|SR_MD|SR_RB );
572 sh4r.pc = sh4r.vbr + 0x600;
573 sh4r.new_pc = sh4r.pc + 2;
574 sh4r.in_delay_slot = 0;
577 void FASTCALL signsat48( void )
579 if( ((int64_t)sh4r.mac) < (int64_t)0xFFFF800000000000LL )
580 sh4r.mac = 0xFFFF800000000000LL;
581 else if( ((int64_t)sh4r.mac) > (int64_t)0x00007FFFFFFFFFFFLL )
582 sh4r.mac = 0x00007FFFFFFFFFFFLL;
585 void FASTCALL sh4_fsca( uint32_t anglei, float *fr )
587 float angle = (((float)(anglei&0xFFFF))/65536.0) * 2 * M_PI;
593 * Enter sleep mode (eg by executing a SLEEP instruction).
594 * Sets sh4_state appropriately and ensures any stopping peripheral modules
597 void FASTCALL sh4_sleep(void)
599 if( MMIO_READ( CPG, STBCR ) & 0x80 ) {
600 sh4r.sh4_state = SH4_STATE_STANDBY;
601 /* Bring all running peripheral modules up to date, and then halt them. */
602 TMU_run_slice( sh4r.slice_cycle );
603 SCIF_run_slice( sh4r.slice_cycle );
604 PMM_run_slice( sh4r.slice_cycle );
606 if( MMIO_READ( CPG, STBCR2 ) & 0x80 ) {
607 sh4r.sh4_state = SH4_STATE_DEEP_SLEEP;
608 /* Halt DMAC but other peripherals still running */
611 sh4r.sh4_state = SH4_STATE_SLEEP;
614 sh4_core_exit( CORE_EXIT_SLEEP );
618 * Wakeup following sleep mode (IRQ or reset). Sets state back to running,
619 * and restarts any peripheral devices that were stopped.
621 void sh4_wakeup(void)
623 switch( sh4r.sh4_state ) {
624 case SH4_STATE_STANDBY:
626 case SH4_STATE_DEEP_SLEEP:
628 case SH4_STATE_SLEEP:
631 sh4r.sh4_state = SH4_STATE_RUNNING;
635 * Run a time slice (or portion of a timeslice) while the SH4 is sleeping.
636 * Returns when either the SH4 wakes up (interrupt received) or the end of
637 * the slice is reached. Updates sh4.slice_cycle with the exit time and
638 * returns the same value.
640 uint32_t sh4_sleep_run_slice( uint32_t nanosecs )
642 assert( sh4r.sh4_state != SH4_STATE_RUNNING );
644 while( sh4r.event_pending < nanosecs ) {
645 sh4r.slice_cycle = sh4r.event_pending;
646 if( sh4r.event_types & PENDING_EVENT ) {
649 if( sh4r.event_types & PENDING_IRQ ) {
651 return sh4r.slice_cycle;
654 if( sh4r.slice_cycle < nanosecs )
655 sh4r.slice_cycle = nanosecs;
656 return sh4r.slice_cycle;
661 * Compute the matrix tranform of fv given the matrix xf.
662 * Both fv and xf are word-swapped as per the sh4r.fr banks
664 void FASTCALL sh4_ftrv( float *target )
666 float fv[4] = { target[1], target[0], target[3], target[2] };
667 target[1] = sh4r.fr[1][1] * fv[0] + sh4r.fr[1][5]*fv[1] +
668 sh4r.fr[1][9]*fv[2] + sh4r.fr[1][13]*fv[3];
669 target[0] = sh4r.fr[1][0] * fv[0] + sh4r.fr[1][4]*fv[1] +
670 sh4r.fr[1][8]*fv[2] + sh4r.fr[1][12]*fv[3];
671 target[3] = sh4r.fr[1][3] * fv[0] + sh4r.fr[1][7]*fv[1] +
672 sh4r.fr[1][11]*fv[2] + sh4r.fr[1][15]*fv[3];
673 target[2] = sh4r.fr[1][2] * fv[0] + sh4r.fr[1][6]*fv[1] +
674 sh4r.fr[1][10]*fv[2] + sh4r.fr[1][14]*fv[3];
677 gboolean sh4_has_page( sh4vma_t vma )
679 sh4addr_t addr = mmu_vma_to_phys_disasm(vma);
680 return addr != MMU_VMA_ERROR && mem_has_page(addr);
683 void sh4_handle_pending_events() {
684 if( sh4r.event_types & PENDING_EVENT ) {
687 /* Eventq execute may (quite likely) deliver an immediate IRQ */
688 if( sh4r.event_types & PENDING_IRQ ) {
689 sh4_accept_interrupt();
694 * Go through ext_address_space page by page
696 size_t sh4_debug_read_phys( unsigned char *buf, uint32_t addr, size_t length )
698 /* Quick and very dirty */
699 unsigned char *region = mem_get_region(addr);
700 if( region == NULL ) {
701 memset( buf, 0, length );
703 memcpy( buf, region, length );
708 size_t sh4_debug_write_phys( uint32_t addr, unsigned char *buf, size_t length )
710 unsigned char *region = mem_get_region(addr);
711 if( region != NULL ) {
712 memcpy( region, buf, length );
718 * Read virtual memory - for now just go 1K at a time
720 size_t sh4_debug_read_vma( unsigned char *buf, uint32_t addr, size_t length )
722 if( IS_TLB_ENABLED() ) {
724 while( length > 0 ) {
725 sh4addr_t phys = mmu_vma_to_phys_disasm(addr);
726 if( phys == MMU_VMA_ERROR )
728 int next_len = 1024 - (phys&0x000003FF);
729 if( next_len >= length ) {
732 sh4_debug_read_phys( buf, phys, length );
735 read_len += next_len;
740 return sh4_debug_read_phys( buf, addr, length );
744 size_t sh4_debug_write_vma( uint32_t addr, unsigned char *buf, size_t length )
746 if( IS_TLB_ENABLED() ) {
748 while( length > 0 ) {
749 sh4addr_t phys = mmu_vma_to_phys_disasm(addr);
750 if( phys == MMU_VMA_ERROR )
752 int next_len = 1024 - (phys&0x000003FF);
753 if( next_len >= length ) {
756 sh4_debug_write_phys( phys, buf, length );
759 read_len += next_len;
764 return sh4_debug_write_phys( addr, buf, length );
.