4 * SH4 parent module for all CPU modes and SH4 peripheral
7 * Copyright (c) 2005 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #define MODULE sh4_module
25 #include "dreamcast.h"
33 #include "sh4/sh4core.h"
34 #include "sh4/sh4dasm.h"
35 #include "sh4/sh4mmio.h"
36 #include "sh4/sh4stat.h"
37 #include "sh4/sh4trans.h"
38 #include "xlat/xltcache.h"
41 #define M_PI 3.14159265358979323846264338327950288
44 void sh4_init( void );
45 void sh4_poweron_reset( void );
46 void sh4_start( void );
47 void sh4_stop( void );
48 void sh4_save_state( FILE *f );
49 int sh4_load_state( FILE *f );
50 size_t sh4_debug_read_phys( unsigned char *buf, uint32_t addr, size_t length );
51 size_t sh4_debug_write_phys( uint32_t addr, unsigned char *buf, size_t length );
52 size_t sh4_debug_read_vma( unsigned char *buf, uint32_t addr, size_t length );
53 size_t sh4_debug_write_vma( uint32_t addr, unsigned char *buf, size_t length );
55 uint32_t sh4_run_slice( uint32_t );
57 /* Note: this must match GDB's ordering */
58 const struct reg_desc_struct sh4_reg_map[] =
59 { {"R0", REG_TYPE_INT, &sh4r.r[0]}, {"R1", REG_TYPE_INT, &sh4r.r[1]},
60 {"R2", REG_TYPE_INT, &sh4r.r[2]}, {"R3", REG_TYPE_INT, &sh4r.r[3]},
61 {"R4", REG_TYPE_INT, &sh4r.r[4]}, {"R5", REG_TYPE_INT, &sh4r.r[5]},
62 {"R6", REG_TYPE_INT, &sh4r.r[6]}, {"R7", REG_TYPE_INT, &sh4r.r[7]},
63 {"R8", REG_TYPE_INT, &sh4r.r[8]}, {"R9", REG_TYPE_INT, &sh4r.r[9]},
64 {"R10",REG_TYPE_INT, &sh4r.r[10]}, {"R11",REG_TYPE_INT, &sh4r.r[11]},
65 {"R12",REG_TYPE_INT, &sh4r.r[12]}, {"R13",REG_TYPE_INT, &sh4r.r[13]},
66 {"R14",REG_TYPE_INT, &sh4r.r[14]}, {"R15",REG_TYPE_INT, &sh4r.r[15]},
67 {"PC", REG_TYPE_INT, &sh4r.pc}, {"PR", REG_TYPE_INT, &sh4r.pr},
68 {"GBR", REG_TYPE_INT, &sh4r.gbr}, {"VBR",REG_TYPE_INT, &sh4r.vbr},
69 {"MACH",REG_TYPE_INT, ((uint32_t *)&sh4r.mac)+1}, {"MACL",REG_TYPE_INT, &sh4r.mac},
70 {"SR", REG_TYPE_INT, &sh4r.sr},
71 {"FPUL", REG_TYPE_INT, &sh4r.fpul.i}, {"FPSCR", REG_TYPE_INT, &sh4r.fpscr},
73 {"FR0", REG_TYPE_FLOAT, &sh4r.fr[0][1] },{"FR1", REG_TYPE_FLOAT, &sh4r.fr[0][0]},
74 {"FR2", REG_TYPE_FLOAT, &sh4r.fr[0][3] },{"FR3", REG_TYPE_FLOAT, &sh4r.fr[0][2]},
75 {"FR4", REG_TYPE_FLOAT, &sh4r.fr[0][5] },{"FR5", REG_TYPE_FLOAT, &sh4r.fr[0][4]},
76 {"FR6", REG_TYPE_FLOAT, &sh4r.fr[0][7] },{"FR7", REG_TYPE_FLOAT, &sh4r.fr[0][6]},
77 {"FR8", REG_TYPE_FLOAT, &sh4r.fr[0][9] },{"FR9", REG_TYPE_FLOAT, &sh4r.fr[0][8]},
78 {"FR10", REG_TYPE_FLOAT, &sh4r.fr[0][11] },{"FR11", REG_TYPE_FLOAT, &sh4r.fr[0][10]},
79 {"FR12", REG_TYPE_FLOAT, &sh4r.fr[0][13] },{"FR13", REG_TYPE_FLOAT, &sh4r.fr[0][12]},
80 {"FR14", REG_TYPE_FLOAT, &sh4r.fr[0][15] },{"FR15", REG_TYPE_FLOAT, &sh4r.fr[0][14]},
82 {"SSR",REG_TYPE_INT, &sh4r.ssr}, {"SPC", REG_TYPE_INT, &sh4r.spc},
84 {"R0B0", REG_TYPE_INT, NULL}, {"R1B0", REG_TYPE_INT, NULL},
85 {"R2B0", REG_TYPE_INT, NULL}, {"R3B0", REG_TYPE_INT, NULL},
86 {"R4B0", REG_TYPE_INT, NULL}, {"R5B0", REG_TYPE_INT, NULL},
87 {"R6B0", REG_TYPE_INT, NULL}, {"R7B0", REG_TYPE_INT, NULL},
88 {"R0B1", REG_TYPE_INT, NULL}, {"R1B1", REG_TYPE_INT, NULL},
89 {"R2B1", REG_TYPE_INT, NULL}, {"R3B1", REG_TYPE_INT, NULL},
90 {"R4B1", REG_TYPE_INT, NULL}, {"R5B1", REG_TYPE_INT, NULL},
91 {"R6B1", REG_TYPE_INT, NULL}, {"R7B1", REG_TYPE_INT, NULL},
93 {"SGR",REG_TYPE_INT, &sh4r.sgr}, {"DBR", REG_TYPE_INT, &sh4r.dbr},
95 {"XF0", REG_TYPE_FLOAT, &sh4r.fr[1][1] },{"XF1", REG_TYPE_FLOAT, &sh4r.fr[1][0]},
96 {"XF2", REG_TYPE_FLOAT, &sh4r.fr[1][3] },{"XF3", REG_TYPE_FLOAT, &sh4r.fr[1][2]},
97 {"XF4", REG_TYPE_FLOAT, &sh4r.fr[1][5] },{"XF5", REG_TYPE_FLOAT, &sh4r.fr[1][4]},
98 {"XF6", REG_TYPE_FLOAT, &sh4r.fr[1][7] },{"XF7", REG_TYPE_FLOAT, &sh4r.fr[1][6]},
99 {"XF8", REG_TYPE_FLOAT, &sh4r.fr[1][9] },{"XF9", REG_TYPE_FLOAT, &sh4r.fr[1][8]},
100 {"XF10", REG_TYPE_FLOAT, &sh4r.fr[1][11] },{"XF11", REG_TYPE_FLOAT, &sh4r.fr[1][10]},
101 {"XF12", REG_TYPE_FLOAT, &sh4r.fr[1][13] },{"XF13", REG_TYPE_FLOAT, &sh4r.fr[1][12]},
102 {"XF14", REG_TYPE_FLOAT, &sh4r.fr[1][15] },{"XF15", REG_TYPE_FLOAT, &sh4r.fr[1][14]},
106 void *sh4_get_register( int reg )
108 if( reg < 0 || reg >= 94 ) {
110 } else if( reg < 43 ) {
111 return sh4_reg_map[reg].value;
112 } else if( reg < 51 ) {
114 if( (sh4r.sr & SR_MDRB) == SR_MDRB ) {
115 /* bank 1 is primary */
116 return &sh4r.r_bank[reg-43];
118 return &sh4r.r[reg-43];
120 } else if( reg < 59 ) {
122 if( (sh4r.sr & SR_MDRB) == SR_MDRB ) {
123 /* bank 1 is primary */
124 return &sh4r.r[reg-43];
126 return &sh4r.r_bank[reg-43];
129 return NULL; /* not supported at the moment */
134 const struct cpu_desc_struct sh4_cpu_desc =
135 { "SH4", sh4_disasm_instruction, sh4_get_register, sh4_has_page,
136 sh4_debug_read_phys, sh4_debug_write_phys, sh4_debug_read_vma, sh4_debug_write_vma,
137 sh4_execute_instruction,
138 sh4_set_breakpoint, sh4_clear_breakpoint, sh4_get_breakpoint, 2,
139 (char *)&sh4r, sizeof(sh4r), sh4_reg_map, 23, 59,
142 struct dreamcast_module sh4_module = { "SH4", sh4_init, sh4_poweron_reset,
143 sh4_start, sh4_run_slice, sh4_stop,
144 sh4_save_state, sh4_load_state };
146 struct sh4_registers sh4r __attribute__((aligned(16)));
147 struct breakpoint_struct sh4_breakpoints[MAX_BREAKPOINTS];
148 int sh4_breakpoint_count = 0;
150 gboolean sh4_starting = FALSE;
151 gboolean sh4_profile_blocks = FALSE;
152 static gboolean sh4_use_translator = FALSE;
153 static jmp_buf sh4_exit_jmp_buf;
154 static gboolean sh4_running = FALSE;
155 struct sh4_icache_struct sh4_icache = { NULL, -1, -1, 0 };
157 /* At the moment this is a dummy event to mark the end of the
160 void sh4_dummy_event(int eventid)
164 void sh4_set_core( sh4core_t core )
166 // No-op if the translator was not built
167 #ifdef SH4_TRANSLATOR
168 if( core != SH4_INTERPRET ) {
169 sh4_translate_init();
170 sh4_use_translator = TRUE;
171 if( core == SH4_SHADOW ) {
175 sh4_use_translator = FALSE;
180 gboolean sh4_translate_is_enabled()
182 return sh4_use_translator;
187 register_io_regions( mmio_list_sh4mmio );
188 register_event_callback( EVENT_ENDTIMESLICE, sh4_dummy_event );
193 #ifdef ENABLE_SH4STATS
203 void sh4_poweron_reset(void)
205 /* zero everything out, for the sake of having a consistent state. */
206 memset( &sh4r, 0, sizeof(sh4r) );
207 if( sh4_use_translator ) {
211 /* Resume running if we were halted */
212 sh4r.sh4_state = SH4_STATE_RUNNING;
214 sh4r.pc = 0xA0000000;
215 sh4r.new_pc= 0xA0000002;
216 sh4r.vbr = 0x00000000;
217 sh4r.fpscr = 0x00040001;
218 sh4_write_sr(0x700000F0);
220 /* Mem reset will do this, but if we want to reset _just_ the SH4... */
221 MMIO_WRITE( MMU, EXPEVT, EXC_POWER_RESET );
223 /* Peripheral modules */
235 if( sh4_use_translator ) {
236 /* If we were running with the translator, update new_pc and in_delay_slot */
237 sh4r.new_pc = sh4r.pc+2;
238 sh4r.in_delay_slot = FALSE;
239 #ifdef SH4_TRANSLATOR
240 if( sh4_profile_blocks ) {
241 sh4_translate_dump_cache_by_activity(30);
248 * Execute a timeslice using translated code only (ie translate/execute loop)
250 uint32_t sh4_run_slice( uint32_t nanosecs )
252 sh4r.slice_cycle = 0;
254 /* Setup for sudden vm exits */
255 switch( setjmp(sh4_exit_jmp_buf) ) {
256 case CORE_EXIT_BREAKPOINT:
257 sh4_clear_breakpoint( sh4r.pc, BREAK_ONESHOT );
260 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
261 TMU_run_slice( sh4r.slice_cycle );
262 SCIF_run_slice( sh4r.slice_cycle );
263 PMM_run_slice( sh4r.slice_cycle );
265 return sh4r.slice_cycle;
267 case CORE_EXIT_SYSRESET:
270 case CORE_EXIT_SLEEP:
272 case CORE_EXIT_FLUSH_ICACHE:
277 if( sh4r.sh4_state != SH4_STATE_RUNNING ) {
278 sh4_sleep_run_slice(nanosecs);
282 /* Execute the core's real slice */
283 #ifdef SH4_TRANSLATOR
284 if( sh4_use_translator ) {
285 sh4_translate_run_slice(nanosecs);
287 sh4_emulate_run_slice(nanosecs);
290 sh4_emulate_run_slice(nanosecs);
294 /* And finish off the peripherals afterwards */
297 sh4_starting = FALSE;
298 sh4r.slice_cycle = nanosecs;
299 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
300 TMU_run_slice( nanosecs );
301 SCIF_run_slice( nanosecs );
302 PMM_run_slice( sh4r.slice_cycle );
307 void sh4_core_exit( int exit_code )
310 #ifdef SH4_TRANSLATOR
311 if( sh4_use_translator ) {
312 if( exit_code == CORE_EXIT_EXCEPTION ) {
313 sh4_translate_exception_exit_recover();
315 sh4_translate_exit_recover();
319 if( exit_code != CORE_EXIT_EXCEPTION &&
320 exit_code != CORE_EXIT_BREAKPOINT ) {
321 sh4_finalize_instruction();
323 // longjmp back into sh4_run_slice
325 longjmp(sh4_exit_jmp_buf, exit_code);
329 void sh4_save_state( FILE *f )
331 if( sh4_use_translator ) {
332 /* If we were running with the translator, update new_pc and in_delay_slot */
333 sh4r.new_pc = sh4r.pc+2;
334 sh4r.in_delay_slot = FALSE;
337 fwrite( &sh4r, offsetof(struct sh4_registers, xlat_sh4_mode), 1, f );
341 INTC_save_state( f );
343 SCIF_save_state( f );
346 int sh4_load_state( FILE * f )
348 if( sh4_use_translator ) {
351 fread( &sh4r, offsetof(struct sh4_registers, xlat_sh4_mode), 1, f );
352 sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
356 INTC_load_state( f );
358 return SCIF_load_state( f );
361 void sh4_set_breakpoint( uint32_t pc, breakpoint_type_t type )
363 sh4_breakpoints[sh4_breakpoint_count].address = pc;
364 sh4_breakpoints[sh4_breakpoint_count].type = type;
365 if( sh4_use_translator ) {
366 xlat_invalidate_word( pc );
368 sh4_breakpoint_count++;
371 gboolean sh4_clear_breakpoint( uint32_t pc, breakpoint_type_t type )
375 for( i=0; i<sh4_breakpoint_count; i++ ) {
376 if( sh4_breakpoints[i].address == pc &&
377 sh4_breakpoints[i].type == type ) {
378 while( ++i < sh4_breakpoint_count ) {
379 sh4_breakpoints[i-1].address = sh4_breakpoints[i].address;
380 sh4_breakpoints[i-1].type = sh4_breakpoints[i].type;
382 if( sh4_use_translator ) {
383 xlat_invalidate_word( pc );
385 sh4_breakpoint_count--;
392 int sh4_get_breakpoint( uint32_t pc )
395 for( i=0; i<sh4_breakpoint_count; i++ ) {
396 if( sh4_breakpoints[i].address == pc )
397 return sh4_breakpoints[i].type;
402 void sh4_set_pc( int pc )
408 void sh4_set_event_pending( uint32_t cycles )
410 sh4r.event_pending = cycles;
413 void sh4_set_profile_blocks( gboolean flag )
415 sh4_profile_blocks = flag;
418 gboolean sh4_get_profile_blocks( )
420 return sh4_profile_blocks;
424 * Dump all SH4 core information for crash-dump purposes
428 cpu_print_registers( stderr, &sh4_cpu_desc );
429 #ifdef SH4_TRANSLATOR
430 if( sh4_use_translator ) {
431 sh4_translate_crashdump();
432 } /* Nothing really to print for emu core */
437 /******************************* Support methods ***************************/
439 static void sh4_switch_banks( )
443 memcpy( tmp, sh4r.r, sizeof(uint32_t)*8 );
444 memcpy( sh4r.r, sh4r.r_bank, sizeof(uint32_t)*8 );
445 memcpy( sh4r.r_bank, tmp, sizeof(uint32_t)*8 );
448 void FASTCALL sh4_switch_fr_banks()
451 for( i=0; i<16; i++ ) {
452 float tmp = sh4r.fr[0][i];
453 sh4r.fr[0][i] = sh4r.fr[1][i];
458 void FASTCALL sh4_write_sr( uint32_t newval )
460 int oldbank = (sh4r.sr&SR_MDRB) == SR_MDRB;
461 int newbank = (newval&SR_MDRB) == SR_MDRB;
462 if( oldbank != newbank )
464 sh4r.sr = newval & SR_MASK;
465 sh4r.t = (newval&SR_T) ? 1 : 0;
466 sh4r.s = (newval&SR_S) ? 1 : 0;
467 sh4r.m = (newval&SR_M) ? 1 : 0;
468 sh4r.q = (newval&SR_Q) ? 1 : 0;
469 sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
473 void FASTCALL sh4_write_fpscr( uint32_t newval )
475 if( (sh4r.fpscr ^ newval) & FPSCR_FR ) {
476 sh4_switch_fr_banks();
478 sh4r.fpscr = newval & FPSCR_MASK;
479 sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
482 uint32_t FASTCALL sh4_read_sr( void )
484 /* synchronize sh4r.sr with the various bitflags */
485 sh4r.sr &= SR_MQSTMASK;
486 if( sh4r.t ) sh4r.sr |= SR_T;
487 if( sh4r.s ) sh4r.sr |= SR_S;
488 if( sh4r.m ) sh4r.sr |= SR_M;
489 if( sh4r.q ) sh4r.sr |= SR_Q;
494 * Raise a CPU reset exception with the specified exception code.
496 void FASTCALL sh4_raise_reset( int code )
498 MMIO_WRITE(MMU,EXPEVT,code);
499 sh4r.vbr = 0x00000000;
500 sh4r.pc = 0xA0000000;
501 sh4r.new_pc = sh4r.pc + 2;
502 sh4r.in_delay_slot = 0;
503 sh4_write_sr( (sh4r.sr|SR_MD|SR_BL|SR_RB|SR_IMASK)&(~SR_FD) );
505 /* Peripheral manual reset (FIXME: incomplete) */
511 void FASTCALL sh4_raise_tlb_multihit( sh4vma_t vpn )
513 MMIO_WRITE( MMU, TEA, vpn );
514 MMIO_WRITE( MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)) );
515 sh4_raise_reset( EXC_TLB_MULTI_HIT );
519 * Raise a general CPU exception for the specified exception code.
520 * (NOT for TRAPA or TLB exceptions)
522 void FASTCALL sh4_raise_exception( int code )
524 if( sh4r.sr & SR_BL ) {
525 sh4_raise_reset( EXC_MANUAL_RESET );
528 sh4r.ssr = sh4_read_sr();
529 sh4r.sgr = sh4r.r[15];
530 MMIO_WRITE(MMU,EXPEVT, code);
531 sh4r.pc = sh4r.vbr + EXV_EXCEPTION;
532 sh4r.new_pc = sh4r.pc + 2;
533 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
534 sh4r.in_delay_slot = 0;
538 void FASTCALL sh4_raise_trap( int trap )
540 MMIO_WRITE( MMU, TRA, trap<<2 );
541 MMIO_WRITE( MMU, EXPEVT, EXC_TRAP );
543 sh4r.ssr = sh4_read_sr();
544 sh4r.sgr = sh4r.r[15];
545 sh4r.pc = sh4r.vbr + EXV_EXCEPTION;
546 sh4r.new_pc = sh4r.pc + 2;
547 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
548 sh4r.in_delay_slot = 0;
551 void FASTCALL sh4_raise_tlb_exception( int code, sh4vma_t vpn )
553 MMIO_WRITE( MMU, TEA, vpn );
554 MMIO_WRITE( MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)) );
555 MMIO_WRITE( MMU, EXPEVT, code );
557 sh4r.ssr = sh4_read_sr();
558 sh4r.sgr = sh4r.r[15];
559 sh4r.pc = sh4r.vbr + EXV_TLBMISS;
560 sh4r.new_pc = sh4r.pc + 2;
561 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
562 sh4r.in_delay_slot = 0;
565 void FASTCALL sh4_reraise_exception( sh4addr_t exception_pc )
568 sh4r.ssr = sh4_read_sr();
569 sh4r.sgr = sh4r.r[15];
570 sh4r.pc = exception_pc;
571 sh4r.new_pc = sh4r.pc + 2;
572 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
573 sh4r.in_delay_slot = 0;
576 void FASTCALL sh4_accept_interrupt( void )
578 uint32_t code = intc_accept_interrupt();
579 MMIO_WRITE( MMU, INTEVT, code );
580 sh4r.ssr = sh4_read_sr();
582 sh4r.sgr = sh4r.r[15];
583 sh4_write_sr( sh4r.ssr|SR_BL|SR_MD|SR_RB );
584 sh4r.pc = sh4r.vbr + 0x600;
585 sh4r.new_pc = sh4r.pc + 2;
586 sh4r.in_delay_slot = 0;
589 void FASTCALL signsat48( void )
591 if( ((int64_t)sh4r.mac) < (int64_t)0xFFFF800000000000LL )
592 sh4r.mac = 0xFFFF800000000000LL;
593 else if( ((int64_t)sh4r.mac) > (int64_t)0x00007FFFFFFFFFFFLL )
594 sh4r.mac = 0x00007FFFFFFFFFFFLL;
597 void FASTCALL sh4_fsca( uint32_t anglei, float *fr )
599 float angle = (((float)(anglei&0xFFFF))/65536.0) * 2 * M_PI;
605 * Enter sleep mode (eg by executing a SLEEP instruction).
606 * Sets sh4_state appropriately and ensures any stopping peripheral modules
609 void FASTCALL sh4_sleep(void)
611 if( MMIO_READ( CPG, STBCR ) & 0x80 ) {
612 sh4r.sh4_state = SH4_STATE_STANDBY;
613 /* Bring all running peripheral modules up to date, and then halt them. */
614 TMU_run_slice( sh4r.slice_cycle );
615 SCIF_run_slice( sh4r.slice_cycle );
616 PMM_run_slice( sh4r.slice_cycle );
618 if( MMIO_READ( CPG, STBCR2 ) & 0x80 ) {
619 sh4r.sh4_state = SH4_STATE_DEEP_SLEEP;
620 /* Halt DMAC but other peripherals still running */
623 sh4r.sh4_state = SH4_STATE_SLEEP;
626 sh4_core_exit( CORE_EXIT_SLEEP );
630 * Wakeup following sleep mode (IRQ or reset). Sets state back to running,
631 * and restarts any peripheral devices that were stopped.
633 void sh4_wakeup(void)
635 switch( sh4r.sh4_state ) {
636 case SH4_STATE_STANDBY:
638 case SH4_STATE_DEEP_SLEEP:
640 case SH4_STATE_SLEEP:
643 sh4r.sh4_state = SH4_STATE_RUNNING;
647 * Run a time slice (or portion of a timeslice) while the SH4 is sleeping.
648 * Returns when either the SH4 wakes up (interrupt received) or the end of
649 * the slice is reached. Updates sh4.slice_cycle with the exit time and
650 * returns the same value.
652 uint32_t sh4_sleep_run_slice( uint32_t nanosecs )
654 assert( sh4r.sh4_state != SH4_STATE_RUNNING );
656 while( sh4r.event_pending < nanosecs ) {
657 sh4r.slice_cycle = sh4r.event_pending;
658 if( sh4r.event_types & PENDING_EVENT ) {
661 if( sh4r.event_types & PENDING_IRQ ) {
663 return sh4r.slice_cycle;
666 if( sh4r.slice_cycle < nanosecs )
667 sh4r.slice_cycle = nanosecs;
668 return sh4r.slice_cycle;
673 * Compute the matrix tranform of fv given the matrix xf.
674 * Both fv and xf are word-swapped as per the sh4r.fr banks
676 void FASTCALL sh4_ftrv( float *target )
678 float fv[4] = { target[1], target[0], target[3], target[2] };
679 target[1] = sh4r.fr[1][1] * fv[0] + sh4r.fr[1][5]*fv[1] +
680 sh4r.fr[1][9]*fv[2] + sh4r.fr[1][13]*fv[3];
681 target[0] = sh4r.fr[1][0] * fv[0] + sh4r.fr[1][4]*fv[1] +
682 sh4r.fr[1][8]*fv[2] + sh4r.fr[1][12]*fv[3];
683 target[3] = sh4r.fr[1][3] * fv[0] + sh4r.fr[1][7]*fv[1] +
684 sh4r.fr[1][11]*fv[2] + sh4r.fr[1][15]*fv[3];
685 target[2] = sh4r.fr[1][2] * fv[0] + sh4r.fr[1][6]*fv[1] +
686 sh4r.fr[1][10]*fv[2] + sh4r.fr[1][14]*fv[3];
689 gboolean sh4_has_page( sh4vma_t vma )
691 sh4addr_t addr = mmu_vma_to_phys_disasm(vma);
692 return addr != MMU_VMA_ERROR && mem_has_page(addr);
695 void sh4_handle_pending_events() {
696 if( sh4r.event_types & PENDING_EVENT ) {
699 /* Eventq execute may (quite likely) deliver an immediate IRQ */
700 if( sh4r.event_types & PENDING_IRQ ) {
701 sh4_accept_interrupt();
706 * Go through ext_address_space page by page
708 size_t sh4_debug_read_phys( unsigned char *buf, uint32_t addr, size_t length )
710 /* Quick and very dirty */
711 unsigned char *region = mem_get_region(addr);
712 if( region == NULL ) {
713 memset( buf, 0, length );
715 memcpy( buf, region, length );
720 size_t sh4_debug_write_phys( uint32_t addr, unsigned char *buf, size_t length )
722 unsigned char *region = mem_get_region(addr);
723 if( region != NULL ) {
724 memcpy( region, buf, length );
730 * Read virtual memory - for now just go 1K at a time
732 size_t sh4_debug_read_vma( unsigned char *buf, uint32_t addr, size_t length )
734 if( IS_TLB_ENABLED() ) {
736 while( length > 0 ) {
737 sh4addr_t phys = mmu_vma_to_phys_disasm(addr);
738 if( phys == MMU_VMA_ERROR )
740 int next_len = 1024 - (phys&0x000003FF);
741 if( next_len >= length ) {
744 sh4_debug_read_phys( buf, phys, length );
747 read_len += next_len;
752 return sh4_debug_read_phys( buf, addr, length );
756 size_t sh4_debug_write_vma( uint32_t addr, unsigned char *buf, size_t length )
758 if( IS_TLB_ENABLED() ) {
760 while( length > 0 ) {
761 sh4addr_t phys = mmu_vma_to_phys_disasm(addr);
762 if( phys == MMU_VMA_ERROR )
764 int next_len = 1024 - (phys&0x000003FF);
765 if( next_len >= length ) {
768 sh4_debug_write_phys( phys, buf, length );
771 read_len += next_len;
776 return sh4_debug_write_phys( addr, buf, length );
.