filename | src/sh4/sh4.c |
changeset | 953:f4a156508ad1 |
prev | 905:4c17ebd9ef5e |
next | 968:6fb1481859a4 |
author | nkeynes |
date | Thu Jan 15 03:54:21 2009 +0000 (15 years ago) |
permissions | -rw-r--r-- |
last change | Fix missing prototype for mmu_vma_to_phys_disasm Fix missing return value in mmu_ext_page_remapped |
view | annotate | diff | log | raw |
1 /**
2 * $Id$
3 *
4 * SH4 parent module for all CPU modes and SH4 peripheral
5 * modules.
6 *
7 * Copyright (c) 2005 Nathan Keynes.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
20 #define MODULE sh4_module
21 #include <math.h>
22 #include <setjmp.h>
23 #include <assert.h>
24 #include "lxdream.h"
25 #include "dreamcast.h"
26 #include "mem.h"
27 #include "clock.h"
28 #include "eventq.h"
29 #include "syscall.h"
30 #include "sh4/intc.h"
31 #include "sh4/sh4core.h"
32 #include "sh4/sh4mmio.h"
33 #include "sh4/sh4stat.h"
34 #include "sh4/sh4trans.h"
35 #include "sh4/xltcache.h"
37 void sh4_init( void );
38 void sh4_xlat_init( void );
39 void sh4_poweron_reset( void );
40 void sh4_start( void );
41 void sh4_stop( void );
42 void sh4_save_state( FILE *f );
43 int sh4_load_state( FILE *f );
45 uint32_t sh4_run_slice( uint32_t );
46 uint32_t sh4_xlat_run_slice( uint32_t );
48 struct dreamcast_module sh4_module = { "SH4", sh4_init, sh4_poweron_reset,
49 sh4_start, sh4_run_slice, sh4_stop,
50 sh4_save_state, sh4_load_state };
52 struct sh4_registers sh4r __attribute__((aligned(16)));
53 struct breakpoint_struct sh4_breakpoints[MAX_BREAKPOINTS];
54 int sh4_breakpoint_count = 0;
56 gboolean sh4_starting = FALSE;
57 static gboolean sh4_use_translator = FALSE;
58 static jmp_buf sh4_exit_jmp_buf;
59 static gboolean sh4_running = FALSE;
60 struct sh4_icache_struct sh4_icache = { NULL, -1, -1, 0 };
62 void sh4_translate_set_enabled( gboolean use )
63 {
64 // No-op if the translator was not built
65 #ifdef SH4_TRANSLATOR
66 if( use ) {
67 sh4_translate_init();
68 }
69 sh4_use_translator = use;
70 #endif
71 }
73 gboolean sh4_translate_is_enabled()
74 {
75 return sh4_use_translator;
76 }
78 void sh4_init(void)
79 {
80 register_io_regions( mmio_list_sh4mmio );
81 MMU_init();
82 TMU_init();
83 xlat_cache_init();
84 sh4_poweron_reset();
85 #ifdef ENABLE_SH4STATS
86 sh4_stats_reset();
87 #endif
88 }
90 void sh4_start(void)
91 {
92 sh4_starting = TRUE;
93 }
95 void sh4_poweron_reset(void)
96 {
97 /* zero everything out, for the sake of having a consistent state. */
98 memset( &sh4r, 0, sizeof(sh4r) );
99 if( sh4_use_translator ) {
100 xlat_flush_cache();
101 }
103 /* Resume running if we were halted */
104 sh4r.sh4_state = SH4_STATE_RUNNING;
106 sh4r.pc = 0xA0000000;
107 sh4r.new_pc= 0xA0000002;
108 sh4r.vbr = 0x00000000;
109 sh4r.fpscr = 0x00040001;
110 sh4_write_sr(0x700000F0);
112 /* Mem reset will do this, but if we want to reset _just_ the SH4... */
113 MMIO_WRITE( MMU, EXPEVT, EXC_POWER_RESET );
115 /* Peripheral modules */
116 CPG_reset();
117 INTC_reset();
118 PMM_reset();
119 TMU_reset();
120 SCIF_reset();
121 MMU_reset();
122 }
124 void sh4_stop(void)
125 {
126 if( sh4_use_translator ) {
127 /* If we were running with the translator, update new_pc and in_delay_slot */
128 sh4r.new_pc = sh4r.pc+2;
129 sh4r.in_delay_slot = FALSE;
130 }
132 }
134 /**
135 * Execute a timeslice using translated code only (ie translate/execute loop)
136 */
137 uint32_t sh4_run_slice( uint32_t nanosecs )
138 {
139 sh4r.slice_cycle = 0;
141 if( sh4r.sh4_state != SH4_STATE_RUNNING ) {
142 sh4_sleep_run_slice(nanosecs);
143 }
145 /* Setup for sudden vm exits */
146 switch( setjmp(sh4_exit_jmp_buf) ) {
147 case CORE_EXIT_BREAKPOINT:
148 sh4_clear_breakpoint( sh4r.pc, BREAK_ONESHOT );
149 /* fallthrough */
150 case CORE_EXIT_HALT:
151 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
152 TMU_run_slice( sh4r.slice_cycle );
153 SCIF_run_slice( sh4r.slice_cycle );
154 PMM_run_slice( sh4r.slice_cycle );
155 dreamcast_stop();
156 return sh4r.slice_cycle;
157 }
158 case CORE_EXIT_SYSRESET:
159 dreamcast_reset();
160 break;
161 case CORE_EXIT_SLEEP:
162 sh4_sleep_run_slice(nanosecs);
163 break;
164 case CORE_EXIT_FLUSH_ICACHE:
165 xlat_flush_cache();
166 break;
167 }
169 sh4_running = TRUE;
171 /* Execute the core's real slice */
172 #ifdef SH4_TRANSLATOR
173 if( sh4_use_translator ) {
174 sh4_translate_run_slice(nanosecs);
175 } else {
176 sh4_emulate_run_slice(nanosecs);
177 }
178 #else
179 sh4_emulate_run_slice(nanosecs);
180 #endif
182 /* And finish off the peripherals afterwards */
184 sh4_running = FALSE;
185 sh4_starting = FALSE;
186 sh4r.slice_cycle = nanosecs;
187 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
188 TMU_run_slice( nanosecs );
189 SCIF_run_slice( nanosecs );
190 PMM_run_slice( sh4r.slice_cycle );
191 }
192 return nanosecs;
193 }
195 void sh4_core_exit( int exit_code )
196 {
197 if( sh4_running ) {
198 #ifdef SH4_TRANSLATOR
199 if( sh4_use_translator ) {
200 if( exit_code == CORE_EXIT_EXCEPTION ) {
201 sh4_translate_exception_exit_recover();
202 } else {
203 sh4_translate_exit_recover();
204 }
205 }
206 #endif
207 if( exit_code != CORE_EXIT_EXCEPTION ) {
208 sh4_finalize_instruction();
209 }
210 // longjmp back into sh4_run_slice
211 sh4_running = FALSE;
212 longjmp(sh4_exit_jmp_buf, exit_code);
213 }
214 }
216 void sh4_save_state( FILE *f )
217 {
218 if( sh4_use_translator ) {
219 /* If we were running with the translator, update new_pc and in_delay_slot */
220 sh4r.new_pc = sh4r.pc+2;
221 sh4r.in_delay_slot = FALSE;
222 }
224 fwrite( &sh4r, offsetof(struct sh4_registers, xlat_sh4_mode), 1, f );
225 MMU_save_state( f );
226 CCN_save_state( f );
227 PMM_save_state( f );
228 INTC_save_state( f );
229 TMU_save_state( f );
230 SCIF_save_state( f );
231 }
233 int sh4_load_state( FILE * f )
234 {
235 if( sh4_use_translator ) {
236 xlat_flush_cache();
237 }
238 fread( &sh4r, offsetof(struct sh4_registers, xlat_sh4_mode), 1, f );
239 sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
240 MMU_load_state( f );
241 CCN_load_state( f );
242 PMM_load_state( f );
243 INTC_load_state( f );
244 TMU_load_state( f );
245 return SCIF_load_state( f );
246 }
248 void sh4_set_breakpoint( uint32_t pc, breakpoint_type_t type )
249 {
250 sh4_breakpoints[sh4_breakpoint_count].address = pc;
251 sh4_breakpoints[sh4_breakpoint_count].type = type;
252 if( sh4_use_translator ) {
253 xlat_invalidate_word( pc );
254 }
255 sh4_breakpoint_count++;
256 }
258 gboolean sh4_clear_breakpoint( uint32_t pc, breakpoint_type_t type )
259 {
260 int i;
262 for( i=0; i<sh4_breakpoint_count; i++ ) {
263 if( sh4_breakpoints[i].address == pc &&
264 sh4_breakpoints[i].type == type ) {
265 while( ++i < sh4_breakpoint_count ) {
266 sh4_breakpoints[i-1].address = sh4_breakpoints[i].address;
267 sh4_breakpoints[i-1].type = sh4_breakpoints[i].type;
268 }
269 if( sh4_use_translator ) {
270 xlat_invalidate_word( pc );
271 }
272 sh4_breakpoint_count--;
273 return TRUE;
274 }
275 }
276 return FALSE;
277 }
279 int sh4_get_breakpoint( uint32_t pc )
280 {
281 int i;
282 for( i=0; i<sh4_breakpoint_count; i++ ) {
283 if( sh4_breakpoints[i].address == pc )
284 return sh4_breakpoints[i].type;
285 }
286 return 0;
287 }
289 void sh4_set_pc( int pc )
290 {
291 sh4r.pc = pc;
292 sh4r.new_pc = pc+2;
293 }
296 /******************************* Support methods ***************************/
298 static void sh4_switch_banks( )
299 {
300 uint32_t tmp[8];
302 memcpy( tmp, sh4r.r, sizeof(uint32_t)*8 );
303 memcpy( sh4r.r, sh4r.r_bank, sizeof(uint32_t)*8 );
304 memcpy( sh4r.r_bank, tmp, sizeof(uint32_t)*8 );
305 }
307 void FASTCALL sh4_switch_fr_banks()
308 {
309 int i;
310 for( i=0; i<16; i++ ) {
311 float tmp = sh4r.fr[0][i];
312 sh4r.fr[0][i] = sh4r.fr[1][i];
313 sh4r.fr[1][i] = tmp;
314 }
315 }
317 void FASTCALL sh4_write_sr( uint32_t newval )
318 {
319 int oldbank = (sh4r.sr&SR_MDRB) == SR_MDRB;
320 int newbank = (newval&SR_MDRB) == SR_MDRB;
321 if( oldbank != newbank )
322 sh4_switch_banks();
323 sh4r.sr = newval & SR_MASK;
324 sh4r.t = (newval&SR_T) ? 1 : 0;
325 sh4r.s = (newval&SR_S) ? 1 : 0;
326 sh4r.m = (newval&SR_M) ? 1 : 0;
327 sh4r.q = (newval&SR_Q) ? 1 : 0;
328 sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
329 intc_mask_changed();
330 }
332 void FASTCALL sh4_write_fpscr( uint32_t newval )
333 {
334 if( (sh4r.fpscr ^ newval) & FPSCR_FR ) {
335 sh4_switch_fr_banks();
336 }
337 sh4r.fpscr = newval & FPSCR_MASK;
338 sh4r.xlat_sh4_mode = (sh4r.sr & SR_MD) | (sh4r.fpscr & (FPSCR_SZ|FPSCR_PR));
339 }
341 uint32_t FASTCALL sh4_read_sr( void )
342 {
343 /* synchronize sh4r.sr with the various bitflags */
344 sh4r.sr &= SR_MQSTMASK;
345 if( sh4r.t ) sh4r.sr |= SR_T;
346 if( sh4r.s ) sh4r.sr |= SR_S;
347 if( sh4r.m ) sh4r.sr |= SR_M;
348 if( sh4r.q ) sh4r.sr |= SR_Q;
349 return sh4r.sr;
350 }
352 /**
353 * Raise a CPU reset exception with the specified exception code.
354 */
355 void FASTCALL sh4_raise_reset( int code )
356 {
357 MMIO_WRITE(MMU,EXPEVT,code);
358 sh4r.vbr = 0x00000000;
359 sh4r.pc = 0xA0000000;
360 sh4r.new_pc = sh4r.pc + 2;
361 sh4r.in_delay_slot = 0;
362 sh4_write_sr( (sh4r.sr|SR_MD|SR_BL|SR_RB|SR_IMASK)&(~SR_FD) );
364 /* Peripheral manual reset (FIXME: incomplete) */
365 INTC_reset();
366 SCIF_reset();
367 MMU_reset();
368 }
370 void FASTCALL sh4_raise_tlb_multihit( sh4vma_t vpn )
371 {
372 MMIO_WRITE( MMU, TEA, vpn );
373 MMIO_WRITE( MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)) );
374 sh4_raise_reset( EXC_TLB_MULTI_HIT );
375 }
377 /**
378 * Raise a general CPU exception for the specified exception code.
379 * (NOT for TRAPA or TLB exceptions)
380 */
381 void FASTCALL sh4_raise_exception( int code )
382 {
383 if( sh4r.sr & SR_BL ) {
384 sh4_raise_reset( EXC_MANUAL_RESET );
385 } else {
386 sh4r.spc = sh4r.pc;
387 sh4r.ssr = sh4_read_sr();
388 sh4r.sgr = sh4r.r[15];
389 MMIO_WRITE(MMU,EXPEVT, code);
390 sh4r.pc = sh4r.vbr + EXV_EXCEPTION;
391 sh4r.new_pc = sh4r.pc + 2;
392 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
393 sh4r.in_delay_slot = 0;
394 }
395 }
397 void FASTCALL sh4_raise_trap( int trap )
398 {
399 MMIO_WRITE( MMU, TRA, trap<<2 );
400 MMIO_WRITE( MMU, EXPEVT, EXC_TRAP );
401 sh4r.spc = sh4r.pc;
402 sh4r.ssr = sh4_read_sr();
403 sh4r.sgr = sh4r.r[15];
404 sh4r.pc = sh4r.vbr + EXV_EXCEPTION;
405 sh4r.new_pc = sh4r.pc + 2;
406 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
407 sh4r.in_delay_slot = 0;
408 }
410 void FASTCALL sh4_raise_tlb_exception( int code, sh4vma_t vpn )
411 {
412 MMIO_WRITE( MMU, TEA, vpn );
413 MMIO_WRITE( MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)) );
414 MMIO_WRITE( MMU, EXPEVT, code );
415 sh4r.spc = sh4r.pc;
416 sh4r.ssr = sh4_read_sr();
417 sh4r.sgr = sh4r.r[15];
418 sh4r.pc = sh4r.vbr + EXV_TLBMISS;
419 sh4r.new_pc = sh4r.pc + 2;
420 sh4_write_sr( sh4r.ssr |SR_MD|SR_BL|SR_RB );
421 sh4r.in_delay_slot = 0;
422 }
424 void FASTCALL sh4_accept_interrupt( void )
425 {
426 uint32_t code = intc_accept_interrupt();
427 MMIO_WRITE( MMU, INTEVT, code );
428 sh4r.ssr = sh4_read_sr();
429 sh4r.spc = sh4r.pc;
430 sh4r.sgr = sh4r.r[15];
431 sh4_write_sr( sh4r.ssr|SR_BL|SR_MD|SR_RB );
432 sh4r.pc = sh4r.vbr + 0x600;
433 sh4r.new_pc = sh4r.pc + 2;
434 }
436 void FASTCALL signsat48( void )
437 {
438 if( ((int64_t)sh4r.mac) < (int64_t)0xFFFF800000000000LL )
439 sh4r.mac = 0xFFFF800000000000LL;
440 else if( ((int64_t)sh4r.mac) > (int64_t)0x00007FFFFFFFFFFFLL )
441 sh4r.mac = 0x00007FFFFFFFFFFFLL;
442 }
444 void FASTCALL sh4_fsca( uint32_t anglei, float *fr )
445 {
446 float angle = (((float)(anglei&0xFFFF))/65536.0) * 2 * M_PI;
447 *fr++ = cosf(angle);
448 *fr = sinf(angle);
449 }
451 /**
452 * Enter sleep mode (eg by executing a SLEEP instruction).
453 * Sets sh4_state appropriately and ensures any stopping peripheral modules
454 * are up to date.
455 */
456 void FASTCALL sh4_sleep(void)
457 {
458 if( MMIO_READ( CPG, STBCR ) & 0x80 ) {
459 sh4r.sh4_state = SH4_STATE_STANDBY;
460 /* Bring all running peripheral modules up to date, and then halt them. */
461 TMU_run_slice( sh4r.slice_cycle );
462 SCIF_run_slice( sh4r.slice_cycle );
463 PMM_run_slice( sh4r.slice_cycle );
464 } else {
465 if( MMIO_READ( CPG, STBCR2 ) & 0x80 ) {
466 sh4r.sh4_state = SH4_STATE_DEEP_SLEEP;
467 /* Halt DMAC but other peripherals still running */
469 } else {
470 sh4r.sh4_state = SH4_STATE_SLEEP;
471 }
472 }
473 sh4_core_exit( CORE_EXIT_SLEEP );
474 }
476 /**
477 * Wakeup following sleep mode (IRQ or reset). Sets state back to running,
478 * and restarts any peripheral devices that were stopped.
479 */
480 void sh4_wakeup(void)
481 {
482 switch( sh4r.sh4_state ) {
483 case SH4_STATE_STANDBY:
484 break;
485 case SH4_STATE_DEEP_SLEEP:
486 break;
487 case SH4_STATE_SLEEP:
488 break;
489 }
490 sh4r.sh4_state = SH4_STATE_RUNNING;
491 }
493 /**
494 * Run a time slice (or portion of a timeslice) while the SH4 is sleeping.
495 * Returns when either the SH4 wakes up (interrupt received) or the end of
496 * the slice is reached. Updates sh4.slice_cycle with the exit time and
497 * returns the same value.
498 */
499 uint32_t sh4_sleep_run_slice( uint32_t nanosecs )
500 {
501 int sleep_state = sh4r.sh4_state;
502 assert( sleep_state != SH4_STATE_RUNNING );
504 while( sh4r.event_pending < nanosecs ) {
505 sh4r.slice_cycle = sh4r.event_pending;
506 if( sh4r.event_types & PENDING_EVENT ) {
507 event_execute();
508 }
509 if( sh4r.event_types & PENDING_IRQ ) {
510 sh4_wakeup();
511 return sh4r.slice_cycle;
512 }
513 }
514 sh4r.slice_cycle = nanosecs;
515 return sh4r.slice_cycle;
516 }
519 /**
520 * Compute the matrix tranform of fv given the matrix xf.
521 * Both fv and xf are word-swapped as per the sh4r.fr banks
522 */
523 void FASTCALL sh4_ftrv( float *target )
524 {
525 float fv[4] = { target[1], target[0], target[3], target[2] };
526 target[1] = sh4r.fr[1][1] * fv[0] + sh4r.fr[1][5]*fv[1] +
527 sh4r.fr[1][9]*fv[2] + sh4r.fr[1][13]*fv[3];
528 target[0] = sh4r.fr[1][0] * fv[0] + sh4r.fr[1][4]*fv[1] +
529 sh4r.fr[1][8]*fv[2] + sh4r.fr[1][12]*fv[3];
530 target[3] = sh4r.fr[1][3] * fv[0] + sh4r.fr[1][7]*fv[1] +
531 sh4r.fr[1][11]*fv[2] + sh4r.fr[1][15]*fv[3];
532 target[2] = sh4r.fr[1][2] * fv[0] + sh4r.fr[1][6]*fv[1] +
533 sh4r.fr[1][10]*fv[2] + sh4r.fr[1][14]*fv[3];
534 }
536 gboolean sh4_has_page( sh4vma_t vma )
537 {
538 sh4addr_t addr = mmu_vma_to_phys_disasm(vma);
539 return addr != MMU_VMA_ERROR && mem_has_page(addr);
540 }
.