filename | src/sh4/sh4core.h |
changeset | 1218:be02e87f9f87 |
prev | 1202:01ae5cbad4c8 |
next | 1296:30ecee61f811 |
author | nkeynes |
date | Fri Mar 02 23:49:10 2012 +1000 (12 years ago) |
permissions | -rw-r--r-- |
last change | Android WIP: * Rename gui_jni.c to gui_android.c - now quite android specific. * Implement generic EGL driver with very minimal Java wrapper * Run emulation in separate thread, and implement simple queue for inter-thread communication. * Add menu/action-bar items for start + reset |
view | annotate | diff | log | raw |
1 /**
2 * $Id$
3 *
4 * This file defines the internal functions used by the SH4 core,
5 *
6 * Copyright (c) 2005-2008 Nathan Keynes.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
19 #ifndef lxdream_sh4core_H
20 #define lxdream_sh4core_H 1
22 #include <glib/gtypes.h>
23 #include <stdint.h>
24 #include <stdio.h>
25 #include "mem.h"
26 #include "sh4/sh4.h"
28 #ifdef __cplusplus
29 extern "C" {
30 #endif
32 /* Breakpoint data structure */
33 extern struct breakpoint_struct sh4_breakpoints[MAX_BREAKPOINTS];
34 extern int sh4_breakpoint_count;
35 extern gboolean sh4_starting;
36 extern gboolean sh4_profile_blocks;
38 /**
39 * Cached direct pointer to the current instruction page. If AT is on, this
40 * is derived from the ITLB, otherwise this will be the entire memory region.
41 * This is actually a fairly useful optimization, as we can make a lot of
42 * assumptions about the "current page" that we can't make in general for
43 * arbitrary virtual addresses.
44 */
45 struct sh4_icache_struct {
46 sh4ptr_t page; // Page pointer (NULL if no page)
47 sh4vma_t page_vma; // virtual address of the page.
48 sh4addr_t page_ppa; // physical address of the page
49 uint32_t mask; // page mask
50 };
51 extern struct sh4_icache_struct sh4_icache;
53 /**
54 * Test if a given address is contained in the current icache entry
55 */
56 #define IS_IN_ICACHE(addr) (sh4_icache.page_vma == ((addr) & sh4_icache.mask))
57 /**
58 * Return a pointer for the given vma, under the assumption that it is
59 * actually contained in the current icache entry.
60 */
61 #define GET_ICACHE_PTR(addr) (sh4_icache.page + ((addr)-sh4_icache.page_vma))
62 /**
63 * Return the physical (external) address for the given vma, assuming that it is
64 * actually contained in the current icache entry.
65 */
66 #define GET_ICACHE_PHYS(addr) (sh4_icache.page_ppa + ((addr)-sh4_icache.page_vma))
68 /**
69 * Return the virtual (vma) address for the first address past the end of the
70 * cache entry. Assumes that there is in fact a current icache entry.
71 */
72 #define GET_ICACHE_END() (sh4_icache.page_vma + (~sh4_icache.mask) + 1)
75 /**
76 * SH4 vm-exit flag - exit the current block but continue normally
77 */
78 #define CORE_EXIT_CONTINUE 1
80 /**
81 * SH4 vm-exit flag - exit the current block and halt immediately (eg fatal error)
82 */
83 #define CORE_EXIT_HALT 2
85 /**
86 * SH4 vm-exit flag - exit the current block and halt immediately for a system
87 * breakpoint.
88 */
89 #define CORE_EXIT_BREAKPOINT 3
91 /**
92 * SH4 vm-exit flag - exit the current block and continue after performing a full
93 * system reset (dreamcast_reset())
94 */
95 #define CORE_EXIT_SYSRESET 4
97 /**
98 * SH4 vm-exit flag - exit the current block and continue after the next IRQ.
99 */
100 #define CORE_EXIT_SLEEP 5
102 /**
103 * SH4 vm-exit flag - exit the current block and flush all instruction caches (ie
104 * if address translation has changed)
105 */
106 #define CORE_EXIT_FLUSH_ICACHE 6
108 /**
109 * SH4 vm-exit flag - exit the current block following a taken exception. sh4r.spc
110 * is fixed up by recovery rather than sh4r.pc.
111 */
112 #define CORE_EXIT_EXCEPTION 7
114 typedef uint32_t (*sh4_run_slice_fn)(uint32_t);
116 /* SH4 module functions */
117 void sh4_init( void );
118 void sh4_reset( void );
119 void sh4_run( void );
120 void sh4_stop( void );
121 uint32_t sh4_run_slice( uint32_t nanos ); // Run single timeslice using emulator
122 uint32_t sh4_xlat_run_slice( uint32_t nanos ); // Run single timeslice using translator
123 uint32_t sh4_sleep_run_slice( uint32_t nanos ); // Run single timeslice while the CPU is asleep
125 /**
126 * Immediately exit from the currently executing instruction with the given
127 * exit code. This method does not return.
128 */
129 void sh4_core_exit( int exit_code );
131 /**
132 * Exit the current block at the end of the current instruction, flush the
133 * translation cache (completely) and return control to sh4_xlat_run_slice.
134 *
135 * As a special case, if the current instruction is actually the last
136 * instruction in the block (ie it's in a delay slot), this function
137 * returns to allow normal completion of the translation block. Otherwise
138 * this function never returns.
139 *
140 * Must only be invoked (indirectly) from within translated code.
141 */
142 void sh4_flush_icache();
144 /* SH4 peripheral module functions */
145 void CPG_reset( void );
146 void DMAC_reset( void );
147 void DMAC_run_slice( uint32_t );
148 void DMAC_save_state( FILE * );
149 int DMAC_load_state( FILE * );
150 void INTC_reset( void );
151 void INTC_save_state( FILE *f );
152 int INTC_load_state( FILE *f );
153 void MMU_init( void );
154 void MMU_reset( void );
155 void MMU_save_state( FILE *f );
156 int MMU_load_state( FILE *f );
157 void MMU_ldtlb();
158 void CCN_reset();
159 void CCN_set_cache_control( int reg );
160 void CCN_save_state( FILE *f );
161 int CCN_load_state( FILE *f );
162 void SCIF_reset( void );
163 void SCIF_run_slice( uint32_t );
164 void SCIF_save_state( FILE *f );
165 int SCIF_load_state( FILE *f );
166 void SCIF_update_line_speed(void);
167 void TMU_init( void );
168 void TMU_reset( void );
169 void TMU_run_slice( uint32_t );
170 void TMU_save_state( FILE * );
171 int TMU_load_state( FILE * );
172 void TMU_update_clocks( void );
173 void PMM_reset( void );
174 void PMM_write_control( int, uint32_t );
175 void PMM_save_state( FILE * );
176 int PMM_load_state( FILE * );
177 uint32_t PMM_run_slice( uint32_t );
178 uint32_t sh4_translate_run_slice(uint32_t);
179 uint32_t sh4_emulate_run_slice(uint32_t);
181 /* SH4 instruction support methods */
182 mem_region_fn_t FASTCALL sh7750_decode_address( sh4addr_t address );
183 void FASTCALL sh7750_decode_address_copy( sh4addr_t address, mem_region_fn_t result );
184 void FASTCALL sh4_sleep( void );
185 void FASTCALL sh4_fsca( uint32_t angle, float *fr );
186 void FASTCALL sh4_ftrv( float *fv );
187 uint32_t FASTCALL sh4_read_sr(void);
188 void FASTCALL sh4_write_sr(uint32_t val);
189 void FASTCALL sh4_write_fpscr(uint32_t val);
190 void FASTCALL sh4_switch_fr_banks(void);
191 void FASTCALL signsat48(void);
192 gboolean sh4_has_page( sh4vma_t vma );
194 /* SH4 Memory */
195 #define MMU_VMA_ERROR 0x80000000
196 /**
197 * Update the sh4_icache structure to contain the specified vma. If the vma
198 * cannot be resolved, an MMU exception is raised and the function returns
199 * FALSE. Otherwise, returns TRUE and updates sh4_icache accordingly.
200 * Note: If the vma resolves to a non-memory area, sh4_icache will be
201 * invalidated, but the function will still return TRUE.
202 * @return FALSE if an MMU exception was raised, otherwise TRUE.
203 */
204 gboolean FASTCALL mmu_update_icache( sh4vma_t addr );
206 int64_t FASTCALL sh4_read_quad( sh4addr_t addr );
207 int32_t FASTCALL sh4_read_long( sh4addr_t addr );
208 int32_t FASTCALL sh4_read_word( sh4addr_t addr );
209 int32_t FASTCALL sh4_read_byte( sh4addr_t addr );
210 void FASTCALL sh4_write_quad( sh4addr_t addr, uint64_t val );
211 void FASTCALL sh4_write_long( sh4addr_t addr, uint32_t val );
212 void FASTCALL sh4_write_word( sh4addr_t addr, uint32_t val );
213 void FASTCALL sh4_write_byte( sh4addr_t addr, uint32_t val );
214 int32_t sh4_read_phys_word( sh4addr_t addr );
215 void FASTCALL sh4_flush_store_queue( sh4addr_t addr );
216 void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc );
218 /* SH4 Exceptions */
219 #define EXC_POWER_RESET 0x000 /* reset vector */
220 #define EXC_MANUAL_RESET 0x020 /* reset vector */
221 #define EXC_TLB_MISS_READ 0x040 /* TLB vector */
222 #define EXC_TLB_MISS_WRITE 0x060 /* TLB vector */
223 #define EXC_INIT_PAGE_WRITE 0x080
224 #define EXC_TLB_PROT_READ 0x0A0
225 #define EXC_TLB_PROT_WRITE 0x0C0
226 #define EXC_DATA_ADDR_READ 0x0E0
227 #define EXC_DATA_ADDR_WRITE 0x100
228 #define EXC_TLB_MULTI_HIT 0x140
229 #define EXC_SLOT_ILLEGAL 0x1A0
230 #define EXC_ILLEGAL 0x180
231 #define EXC_TRAP 0x160
232 #define EXC_FPU_DISABLED 0x800
233 #define EXC_SLOT_FPU_DISABLED 0x820
235 #define EXV_EXCEPTION 0x100 /* General exception vector */
236 #define EXV_TLBMISS 0x400 /* TLB-miss exception vector */
237 #define EXV_INTERRUPT 0x600 /* External interrupt vector */
239 void FASTCALL sh4_raise_exception( int );
240 void FASTCALL sh4_raise_reset( int );
241 void FASTCALL sh4_raise_trap( int );
242 void FASTCALL sh4_raise_tlb_exception( int, sh4vma_t );
243 void FASTCALL sh4_raise_tlb_multihit( sh4vma_t );
244 void FASTCALL sh4_accept_interrupt( void );
246 #define RAISE_TLB_ERROR(code, vpn) sh4_raise_tlb_exception(code, vpn)
247 #define RAISE_MEM_ERROR(code, vpn) \
248 MMIO_WRITE(MMU, TEA, vpn); \
249 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
250 sh4_raise_exception(code);
251 #define RAISE_TLB_MULTIHIT_ERROR(vpn) sh4_raise_tlb_multihit(vpn)
253 #ifdef HAVE_FRAME_ADDRESS
254 #define SH4_EXCEPTION_EXIT() do{ *(((void * volatile *)__builtin_frame_address(0))+1) = exc; } while(0)
255 #else
256 #define SH4_EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
257 #endif
259 /**
260 * Helper method to update the SH4 registers for an exception, without
261 * touching the MMU registers. Mainly for use in shadow mode.
262 */
263 void FASTCALL sh4_reraise_exception( sh4addr_t exception_pc );
264 /**
265 * Complete the current instruction as part of a core exit. Prevents the
266 * system from being left in an inconsistent state when an exit is
267 * triggered during a memory write.
268 */
269 void sh4_finalize_instruction( void );
271 /* Status Register (SR) bits */
272 #define SR_MD 0x40000000 /* Processor mode ( User=0, Privileged=1 ) */
273 #define SR_RB 0x20000000 /* Register bank (priviledged mode only) */
274 #define SR_BL 0x10000000 /* Exception/interupt block (1 = masked) */
275 #define SR_FD 0x00008000 /* FPU disable */
276 #define SR_M 0x00000200
277 #define SR_Q 0x00000100
278 #define SR_IMASK 0x000000F0 /* Interrupt mask level */
279 #define SR_S 0x00000002 /* Saturation operation for MAC instructions */
280 #define SR_T 0x00000001 /* True/false or carry/borrow */
281 #define SR_MASK 0x700083F3
282 #define SR_MQSTMASK 0xFFFFFCFC /* Mask to clear the flags we're keeping separately */
283 #define SR_MDRB 0x60000000 /* MD+RB mask for convenience */
285 #define IS_SH4_PRIVMODE() (sh4r.sr&SR_MD)
286 #define SH4_INTMASK() ((sh4r.sr&SR_IMASK)>>4)
287 #define SH4_EVENT_PENDING() (sh4r.event_pending <= sh4r.slice_cycle && !sh4r.in_delay_slot)
289 #define FPSCR_FR 0x00200000 /* FPU register bank */
290 #define FPSCR_SZ 0x00100000 /* FPU transfer size (0=32 bits, 1=64 bits) */
291 #define FPSCR_PR 0x00080000 /* Precision (0=32 bites, 1=64 bits) */
292 #define FPSCR_DN 0x00040000 /* Denormalization mode (1 = treat as 0) */
293 #define FPSCR_CAUSE 0x0003F000
294 #define FPSCR_ENABLE 0x00000F80
295 #define FPSCR_FLAG 0x0000007C
296 #define FPSCR_RM 0x00000003 /* Rounding mode (0=nearest, 1=to zero) */
297 #define FPSCR_MASK 0x003FFFFF
299 #define IS_FPU_DOUBLEPREC() (sh4r.fpscr&FPSCR_PR)
300 #define IS_FPU_DOUBLESIZE() (sh4r.fpscr&FPSCR_SZ)
301 #define IS_FPU_ENABLED() ((sh4r.sr&SR_FD)==0)
303 #define FR(x) sh4r.fr[0][(x)^1]
304 #define DRF(x) *((double *)&sh4r.fr[0][(x)<<1])
305 #define XF(x) sh4r.fr[1][(x)^1]
306 #define XDR(x) *((double *)&sh4r.fr[1][(x)<<1])
307 #define DRb(x,b) *((double *)&sh4r.fr[b][(x)<<1])
308 #define DR(x) *((double *)&sh4r.fr[x&1][x&0x0E])
309 #define FPULf (sh4r.fpul.f)
310 #define FPULi (sh4r.fpul.i)
312 /**************** SH4 internal memory regions *****************/
313 extern struct mem_region_fn p4_region_itlb_addr;
314 extern struct mem_region_fn p4_region_itlb_data;
315 extern struct mem_region_fn p4_region_utlb_addr;
316 extern struct mem_region_fn p4_region_utlb_data;
317 extern struct mem_region_fn p4_region_icache_addr;
318 extern struct mem_region_fn p4_region_icache_data;
319 extern struct mem_region_fn p4_region_ocache_addr;
320 extern struct mem_region_fn p4_region_ocache_data;
322 #define OC_ENABLED 1
324 #ifdef __cplusplus
325 }
326 #endif
328 #endif /* !lxdream_sh4core_H */
.