4 * SH4 parent module for all CPU modes and SH4 peripheral
7 * Copyright (c) 2005 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #define MODULE sh4_module
25 #include "dreamcast.h"
32 #include "sh4/sh4core.h"
33 #include "sh4/sh4mmio.h"
34 #include "sh4/sh4stat.h"
35 #include "sh4/sh4trans.h"
36 #include "sh4/xltcache.h"
39 #define M_PI 3.14159265358979323846264338327950288
42 void sh4_init( void );
43 void sh4_xlat_init( void );
44 void sh4_poweron_reset( void );
45 void sh4_start( void );
46 void sh4_stop( void );
47 void sh4_save_state( FILE *f );
48 int sh4_load_state( FILE *f );
50 uint32_t sh4_run_slice( uint32_t );
51 uint32_t sh4_xlat_run_slice( uint32_t );
53 struct dreamcast_module sh4_module = { "SH4", sh4_init, sh4_poweron_reset,
54 sh4_start, sh4_run_slice, sh4_stop,
55 sh4_save_state, sh4_load_state };
57 struct sh4_registers sh4r __attribute__((aligned(16)));
58 struct breakpoint_struct sh4_breakpoints[MAX_BREAKPOINTS];
59 int sh4_breakpoint_count = 0;
61 gboolean sh4_starting = FALSE;
62 static gboolean sh4_use_translator = FALSE;
63 static jmp_buf sh4_exit_jmp_buf;
64 static gboolean sh4_running = FALSE;
65 struct sh4_icache_struct sh4_icache = { NULL, -1, -1, 0 };
67 void sh4_translate_set_enabled( gboolean use )
69 // No-op if the translator was not built
74 sh4_use_translator = use;
78 gboolean sh4_translate_is_enabled()
80 return sh4_use_translator;
85 register_io_regions( mmio_list_sh4mmio );
90 #ifdef ENABLE_SH4STATS
100 void sh4_poweron_reset(void)
102 /* zero everything out, for the sake of having a consistent state. */
103 memset( &sh4r, 0, sizeof(sh4r) );
104 if( sh4_use_translator ) {
108 /* Resume running if we were halted */
109 sh4r.sh4_state = SH4_STATE_RUNNING;
111 sh4r.pc = 0xA0000000;
112 sh4r.new_pc= 0xA0000002;
113 sh4r.vbr = 0x00000000;
114 sh4r.fpscr = 0x00040001;
115 sh4_write_sr(0x700000F0);
117 /* Mem reset will do this, but if we want to reset _just_ the SH4... */
118 MMIO_WRITE( MMU, EXPEVT, EXC_POWER_RESET );
120 /* Peripheral modules */
132 if( sh4_use_translator ) {
133 /* If we were running with the translator, update new_pc and in_delay_slot */
134 sh4r.new_pc = sh4r.pc+2;
135 sh4r.in_delay_slot = FALSE;
141 * Execute a timeslice using translated code only (ie translate/execute loop)
143 uint32_t sh4_run_slice( uint32_t nanosecs )
145 sh4r.slice_cycle = 0;
147 if( sh4r.sh4_state != SH4_STATE_RUNNING ) {
148 sh4_sleep_run_slice(nanosecs);
151 /* Setup for sudden vm exits */
152 switch( setjmp(sh4_exit_jmp_buf) ) {
153 case CORE_EXIT_BREAKPOINT:
154 sh4_clear_breakpoint( sh4r.pc, BREAK_ONESHOT );
157 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
158 TMU_run_slice( sh4r.slice_cycle );
159 SCIF_run_slice( sh4r.slice_cycle );
160 PMM_run_slice( sh4r.slice_cycle );
162 return sh4r.slice_cycle;
164 case CORE_EXIT_SYSRESET:
167 case CORE_EXIT_SLEEP:
168 sh4_sleep_run_slice(nanosecs);
170 case CORE_EXIT_FLUSH_ICACHE:
177 /* Execute the core's real slice */
178 #ifdef SH4_TRANSLATOR
179 if( sh4_use_translator ) {
180 sh4_translate_run_slice(nanosecs);
182 sh4_emulate_run_slice(nanosecs);
185 sh4_emulate_run_slice(nanosecs);
188 /* And finish off the peripherals afterwards */
191 sh4_starting = FALSE;
192 sh4r.slice_cycle = nanosecs;
193 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
194 TMU_run_slice( nanosecs );
195 SCIF_run_slice( nanosecs );
196 PMM_run_slice( sh4r.slice_cycle );
201 void sh4_core_exit( int exit_code )
204 #ifdef SH4_TRANSLATOR
205 if( sh4_use_translator ) {
206 if( exit_code == CORE_EXIT_EXCEPTION ) {
207 sh4_translate_exception_exit_recover();
209 sh4_translate_exit_recover();
213 if( exit_code != CORE_EXIT_EXCEPTION &&
214 exit_code != CORE_EXIT_BREAKPOINT ) {
215 sh4_finalize_instruction();
217 // longjmp back into sh4_run_slice
219 longjmp(sh4_exit_jmp_buf, exit_code);
223 void sh4_save_state( FILE *f )
225 if( sh4_use_translator ) {
226 /* If we were running with the translator, update new_pc and in_delay_slot */
227 sh4r.new_pc = sh4r.pc+2;
228 sh4r.in_delay_slot = FALSE;
231 fwrite( &sh4r, offsetof(struct sh4_registers, xlat_sh4_mode), 1, f );
235 INTC_save_state( f );
237 SCIF_save_state( f );
240 int sh4_load_state( FILE * f )
242 if( sh4_use_translator ) {
245 fread( &sh4r, offsetof(struct sh4_registers, xlat_sh4_mode), 1, f );
246 sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
250 INTC_load_state( f );
252 return SCIF_load_state( f );
255 void sh4_set_breakpoint( uint32_t pc, breakpoint_type_t type )
257 sh4_breakpoints[sh4_breakpoint_count].address = pc;
258 sh4_breakpoints[sh4_breakpoint_count].type = type;
259 if( sh4_use_translator ) {
260 xlat_invalidate_word( pc );
262 sh4_breakpoint_count++;
265 gboolean sh4_clear_breakpoint( uint32_t pc, breakpoint_type_t type )
269 for( i=0; i<sh4_breakpoint_count; i++ ) {
270 if( sh4_breakpoints[i].address == pc &&
271 sh4_breakpoints[i].type == type ) {
272 while( ++i < sh4_breakpoint_count ) {
273 sh4_breakpoints[i-1].address = sh4_breakpoints[i].address;
274 sh4_breakpoints[i-1].type = sh4_breakpoints[i].type;
276 if( sh4_use_translator ) {
277 xlat_invalidate_word( pc );
279 sh4_breakpoint_count--;
286 int sh4_get_breakpoint( uint32_t pc )
289 for( i=0; i<sh4_breakpoint_count; i++ ) {
290 if( sh4_breakpoints[i].address == pc )
291 return sh4_breakpoints[i].type;
296 void sh4_set_pc( int pc )
303 /******************************* Support methods ***************************/
305 static void sh4_switch_banks( )
309 memcpy( tmp, sh4r.r, sizeof(uint32_t)*8 );
310 memcpy( sh4r.r, sh4r.r_bank, sizeof(uint32_t)*8 );
311 memcpy( sh4r.r_bank, tmp, sizeof(uint32_t)*8 );
314 void FASTCALL sh4_switch_fr_banks()
317 for( i=0; i<16; i++ ) {
318 float tmp = sh4r.fr[0][i];
319 sh4r.fr[0][i] = sh4r.fr[1][i];
324 void FASTCALL sh4_write_sr( uint32_t newval )
326 int oldbank = (sh4r.sr&SR_MDRB) == SR_MDRB;
327 int newbank = (newval&SR_MDRB) == SR_MDRB;
328 if( oldbank != newbank )
330 sh4r.sr = newval & SR_MASK;
331 sh4r.t = (newval&SR_T) ? 1 : 0;
332 sh4r.s = (newval&SR_S) ? 1 : 0;
333 sh4r.m = (newval&SR_M) ? 1 : 0;
334 sh4r.q = (newval&SR_Q) ? 1 : 0;
335 sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
339 void FASTCALL sh4_write_fpscr( uint32_t newval )
341 if( (sh4r.fpscr ^ newval) & FPSCR_FR ) {
342 sh4_switch_fr_banks();
344 sh4r.fpscr = newval & FPSCR_MASK;
345 sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
348 uint32_t FASTCALL sh4_read_sr( void )
350 /* synchronize sh4r.sr with the various bitflags */
351 sh4r.sr &= SR_MQSTMASK;
352 if( sh4r.t ) sh4r.sr |= SR_T;
353 if( sh4r.s ) sh4r.sr |= SR_S;
354 if( sh4r.m ) sh4r.sr |= SR_M;
355 if( sh4r.q ) sh4r.sr |= SR_Q;
360 * Raise a CPU reset exception with the specified exception code.
362 void FASTCALL sh4_raise_reset( int code )
364 MMIO_WRITE(MMU,EXPEVT,code);
365 sh4r.vbr = 0x00000000;
366 sh4r.pc = 0xA0000000;
367 sh4r.new_pc = sh4r.pc + 2;
368 sh4r.in_delay_slot = 0;
369 sh4_write_sr( (sh4r.sr|SR_MD|SR_BL|SR_RB|SR_IMASK)&(~SR_FD) );
371 /* Peripheral manual reset (FIXME: incomplete) */
377 void FASTCALL sh4_raise_tlb_multihit( sh4vma_t vpn )
379 MMIO_WRITE( MMU, TEA, vpn );
380 MMIO_WRITE( MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)) );
381 sh4_raise_reset( EXC_TLB_MULTI_HIT );
385 * Raise a general CPU exception for the specified exception code.
386 * (NOT for TRAPA or TLB exceptions)
388 void FASTCALL sh4_raise_exception( int code )
390 if( sh4r.sr & SR_BL ) {
391 sh4_raise_reset( EXC_MANUAL_RESET );
394 sh4r.ssr = sh4_read_sr();
395 sh4r.sgr = sh4r.r[15];
396 MMIO_WRITE(MMU,EXPEVT, code);
397 sh4r.pc = sh4r.vbr + EXV_EXCEPTION;
398 sh4r.new_pc = sh4r.pc + 2;
399 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
400 sh4r.in_delay_slot = 0;
404 void FASTCALL sh4_raise_trap( int trap )
406 MMIO_WRITE( MMU, TRA, trap<<2 );
407 MMIO_WRITE( MMU, EXPEVT, EXC_TRAP );
409 sh4r.ssr = sh4_read_sr();
410 sh4r.sgr = sh4r.r[15];
411 sh4r.pc = sh4r.vbr + EXV_EXCEPTION;
412 sh4r.new_pc = sh4r.pc + 2;
413 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
414 sh4r.in_delay_slot = 0;
417 void FASTCALL sh4_raise_tlb_exception( int code, sh4vma_t vpn )
419 MMIO_WRITE( MMU, TEA, vpn );
420 MMIO_WRITE( MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)) );
421 MMIO_WRITE( MMU, EXPEVT, code );
423 sh4r.ssr = sh4_read_sr();
424 sh4r.sgr = sh4r.r[15];
425 sh4r.pc = sh4r.vbr + EXV_TLBMISS;
426 sh4r.new_pc = sh4r.pc + 2;
427 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
428 sh4r.in_delay_slot = 0;
431 void FASTCALL sh4_accept_interrupt( void )
433 uint32_t code = intc_accept_interrupt();
434 MMIO_WRITE( MMU, INTEVT, code );
435 sh4r.ssr = sh4_read_sr();
437 sh4r.sgr = sh4r.r[15];
438 sh4_write_sr( sh4r.ssr|SR_BL|SR_MD|SR_RB );
439 sh4r.pc = sh4r.vbr + 0x600;
440 sh4r.new_pc = sh4r.pc + 2;
441 sh4r.in_delay_slot = 0;
444 void FASTCALL signsat48( void )
446 if( ((int64_t)sh4r.mac) < (int64_t)0xFFFF800000000000LL )
447 sh4r.mac = 0xFFFF800000000000LL;
448 else if( ((int64_t)sh4r.mac) > (int64_t)0x00007FFFFFFFFFFFLL )
449 sh4r.mac = 0x00007FFFFFFFFFFFLL;
452 void FASTCALL sh4_fsca( uint32_t anglei, float *fr )
454 float angle = (((float)(anglei&0xFFFF))/65536.0) * 2 * M_PI;
460 * Enter sleep mode (eg by executing a SLEEP instruction).
461 * Sets sh4_state appropriately and ensures any stopping peripheral modules
464 void FASTCALL sh4_sleep(void)
466 if( MMIO_READ( CPG, STBCR ) & 0x80 ) {
467 sh4r.sh4_state = SH4_STATE_STANDBY;
468 /* Bring all running peripheral modules up to date, and then halt them. */
469 TMU_run_slice( sh4r.slice_cycle );
470 SCIF_run_slice( sh4r.slice_cycle );
471 PMM_run_slice( sh4r.slice_cycle );
473 if( MMIO_READ( CPG, STBCR2 ) & 0x80 ) {
474 sh4r.sh4_state = SH4_STATE_DEEP_SLEEP;
475 /* Halt DMAC but other peripherals still running */
478 sh4r.sh4_state = SH4_STATE_SLEEP;
481 sh4_core_exit( CORE_EXIT_SLEEP );
485 * Wakeup following sleep mode (IRQ or reset). Sets state back to running,
486 * and restarts any peripheral devices that were stopped.
488 void sh4_wakeup(void)
490 switch( sh4r.sh4_state ) {
491 case SH4_STATE_STANDBY:
493 case SH4_STATE_DEEP_SLEEP:
495 case SH4_STATE_SLEEP:
498 sh4r.sh4_state = SH4_STATE_RUNNING;
502 * Run a time slice (or portion of a timeslice) while the SH4 is sleeping.
503 * Returns when either the SH4 wakes up (interrupt received) or the end of
504 * the slice is reached. Updates sh4.slice_cycle with the exit time and
505 * returns the same value.
507 uint32_t sh4_sleep_run_slice( uint32_t nanosecs )
509 int sleep_state = sh4r.sh4_state;
510 assert( sleep_state != SH4_STATE_RUNNING );
512 while( sh4r.event_pending < nanosecs ) {
513 sh4r.slice_cycle = sh4r.event_pending;
514 if( sh4r.event_types & PENDING_EVENT ) {
517 if( sh4r.event_types & PENDING_IRQ ) {
519 return sh4r.slice_cycle;
522 sh4r.slice_cycle = nanosecs;
523 return sh4r.slice_cycle;
528 * Compute the matrix tranform of fv given the matrix xf.
529 * Both fv and xf are word-swapped as per the sh4r.fr banks
531 void FASTCALL sh4_ftrv( float *target )
533 float fv[4] = { target[1], target[0], target[3], target[2] };
534 target[1] = sh4r.fr[1][1] * fv[0] + sh4r.fr[1][5]*fv[1] +
535 sh4r.fr[1][9]*fv[2] + sh4r.fr[1][13]*fv[3];
536 target[0] = sh4r.fr[1][0] * fv[0] + sh4r.fr[1][4]*fv[1] +
537 sh4r.fr[1][8]*fv[2] + sh4r.fr[1][12]*fv[3];
538 target[3] = sh4r.fr[1][3] * fv[0] + sh4r.fr[1][7]*fv[1] +
539 sh4r.fr[1][11]*fv[2] + sh4r.fr[1][15]*fv[3];
540 target[2] = sh4r.fr[1][2] * fv[0] + sh4r.fr[1][6]*fv[1] +
541 sh4r.fr[1][10]*fv[2] + sh4r.fr[1][14]*fv[3];
544 gboolean sh4_has_page( sh4vma_t vma )
546 sh4addr_t addr = mmu_vma_to_phys_disasm(vma);
547 return addr != MMU_VMA_ERROR && mem_has_page(addr);
.