filename | src/sh4/sh4trans.c |
changeset | 619:0800a0137472 |
prev | 617:476a717a54f3 |
next | 669:ab344e42bca9 |
author | nkeynes |
date | Fri Feb 08 00:06:56 2008 +0000 (16 years ago) |
permissions | -rw-r--r-- |
last change | Fix LDS/STS to FPUL/FPSCR to check the FPU disabled bit. Fixes the linux 2.4.0-test8 kernel boot (this wasn't exactly very well documented in the original manual) |
view | annotate | diff | log | raw |
1 /**
2 * $Id$
3 *
4 * SH4 translation core module. This part handles the non-target-specific
5 * section of the translation.
6 *
7 * Copyright (c) 2005 Nathan Keynes.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19 #include <assert.h>
20 #include <setjmp.h>
21 #include "eventq.h"
22 #include "syscall.h"
23 #include "clock.h"
24 #include "sh4/sh4core.h"
25 #include "sh4/sh4trans.h"
26 #include "sh4/xltcache.h"
29 static jmp_buf xlat_jmp_buf;
30 static gboolean xlat_running = FALSE;
32 gboolean sh4_xlat_is_running()
33 {
34 return xlat_running;
35 }
37 /**
38 * Execute a timeslice using translated code only (ie translate/execute loop)
39 */
40 uint32_t sh4_xlat_run_slice( uint32_t nanosecs )
41 {
42 sh4r.slice_cycle = 0;
44 if( sh4r.sh4_state != SH4_STATE_RUNNING ) {
45 sh4_sleep_run_slice(nanosecs);
46 }
48 switch( setjmp(xlat_jmp_buf) ) {
49 case XLAT_EXIT_BREAKPOINT:
50 sh4_clear_breakpoint( sh4r.pc, BREAK_ONESHOT );
51 /* fallthrough */
52 case XLAT_EXIT_HALT:
53 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
54 TMU_run_slice( sh4r.slice_cycle );
55 SCIF_run_slice( sh4r.slice_cycle );
56 dreamcast_stop();
57 return sh4r.slice_cycle;
58 }
59 case XLAT_EXIT_SYSRESET:
60 dreamcast_reset();
61 break;
62 case XLAT_EXIT_SLEEP:
63 sh4_sleep_run_slice(nanosecs);
64 break;
65 }
67 xlat_running = TRUE;
68 void * (*code)() = NULL;
69 while( sh4r.slice_cycle < nanosecs ) {
70 if( sh4r.event_pending <= sh4r.slice_cycle ) {
71 if( sh4r.event_types & PENDING_EVENT ) {
72 event_execute();
73 }
74 /* Eventq execute may (quite likely) deliver an immediate IRQ */
75 if( sh4r.event_types & PENDING_IRQ ) {
76 sh4_accept_interrupt();
77 code = NULL;
78 }
79 }
81 if( code == NULL ) {
82 if( sh4r.pc > 0xFFFFFF00 ) {
83 syscall_invoke( sh4r.pc );
84 sh4r.in_delay_slot = 0;
85 sh4r.pc = sh4r.pr;
86 }
88 code = xlat_get_code_by_vma( sh4r.pc );
89 if( code == NULL ) {
90 code = sh4_translate_basic_block( sh4r.pc );
91 }
92 }
93 uint32_t oldpc = sh4r.pc;
94 code = code();
95 }
97 xlat_running = FALSE;
98 sh4_starting = FALSE;
99 sh4r.slice_cycle = nanosecs;
100 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
101 TMU_run_slice( nanosecs );
102 SCIF_run_slice( nanosecs );
103 }
104 return nanosecs;
105 }
107 uint8_t *xlat_output;
108 xlat_cache_block_t xlat_current_block;
109 struct xlat_recovery_record xlat_recovery[MAX_RECOVERY_SIZE];
110 uint32_t xlat_recovery_posn;
112 void sh4_translate_add_recovery( uint32_t icount )
113 {
114 xlat_recovery[xlat_recovery_posn].xlat_offset =
115 ((uintptr_t)xlat_output) - ((uintptr_t)xlat_current_block->code);
116 xlat_recovery[xlat_recovery_posn].sh4_icount = icount;
117 xlat_recovery_posn++;
118 }
120 /**
121 * Translate a linear basic block, ie all instructions from the start address
122 * (inclusive) until the next branch/jump instruction or the end of the page
123 * is reached.
124 * @return the address of the translated block
125 * eg due to lack of buffer space.
126 */
127 void * sh4_translate_basic_block( sh4addr_t start )
128 {
129 sh4addr_t pc = start;
130 sh4addr_t lastpc = (pc&0xFFFFF000)+0x1000;
131 int done, i;
132 xlat_current_block = xlat_start_block( start );
133 xlat_output = (uint8_t *)xlat_current_block->code;
134 xlat_recovery_posn = 0;
135 uint8_t *eob = xlat_output + xlat_current_block->size;
137 if( GET_ICACHE_END() < lastpc ) {
138 lastpc = GET_ICACHE_END();
139 }
141 sh4_translate_begin_block(pc);
143 do {
144 /* check for breakpoints at this pc */
145 for( i=0; i<sh4_breakpoint_count; i++ ) {
146 if( sh4_breakpoints[i].address == pc ) {
147 sh4_translate_emit_breakpoint(pc);
148 break;
149 }
150 }
151 if( eob - xlat_output < MAX_INSTRUCTION_SIZE ) {
152 uint8_t *oldstart = xlat_current_block->code;
153 xlat_current_block = xlat_extend_block( xlat_output - oldstart + MAX_INSTRUCTION_SIZE );
154 xlat_output = xlat_current_block->code + (xlat_output - oldstart);
155 eob = xlat_current_block->code + xlat_current_block->size;
156 }
157 done = sh4_translate_instruction( pc );
158 assert( xlat_output <= eob );
159 pc += 2;
160 if ( pc >= lastpc ) {
161 done = 2;
162 }
163 } while( !done );
164 pc += (done - 2);
166 // Add end-of-block recovery for post-instruction checks
167 sh4_translate_add_recovery( (pc - start)>>1 );
169 int epilogue_size = sh4_translate_end_block_size();
170 uint32_t recovery_size = sizeof(struct xlat_recovery_record)*xlat_recovery_posn;
171 uint32_t finalsize = xlat_output - xlat_current_block->code + epilogue_size + recovery_size;
172 if( eob - xlat_output < finalsize ) {
173 uint8_t *oldstart = xlat_current_block->code;
174 xlat_current_block = xlat_extend_block( finalsize );
175 xlat_output = xlat_current_block->code + (xlat_output - oldstart);
176 }
177 sh4_translate_end_block(pc);
179 /* Write the recovery records onto the end of the code block */
180 memcpy( xlat_output, xlat_recovery, recovery_size);
181 xlat_current_block->recover_table_offset = xlat_output - (uint8_t *)xlat_current_block->code;
182 xlat_current_block->recover_table_size = xlat_recovery_posn;
183 xlat_commit_block( finalsize, pc-start );
184 return xlat_current_block->code;
185 }
187 /**
188 * "Execute" the supplied recovery record. Currently this only updates
189 * sh4r.pc and sh4r.slice_cycle according to the currently executing
190 * instruction. In future this may be more sophisticated (ie will
191 * call into generated code).
192 */
193 void sh4_translate_run_recovery( xlat_recovery_record_t recovery )
194 {
195 sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
196 sh4r.pc += (recovery->sh4_icount<<1);
197 }
199 void sh4_translate_unwind_stack( gboolean abort_after, unwind_thunk_t thunk )
200 {
201 void *pc = xlat_get_native_pc();
203 assert( pc != NULL );
204 void *code = xlat_get_code( sh4r.pc );
205 xlat_recovery_record_t recover = xlat_get_recovery(code, pc, TRUE);
206 if( recover != NULL ) {
207 // Can be null if there is no recovery necessary
208 sh4_translate_run_recovery(recover);
209 }
210 if( thunk != NULL ) {
211 thunk();
212 }
213 // finally longjmp back into sh4_xlat_run_slice
214 xlat_running = FALSE;
215 longjmp(xlat_jmp_buf, XLAT_EXIT_CONTINUE);
216 }
218 void sh4_translate_exit( int exit_code )
219 {
220 void *pc = xlat_get_native_pc();
221 if( pc != NULL ) {
222 // could be null if we're not actually running inside the translator
223 void *code = xlat_get_code( sh4r.pc );
224 xlat_recovery_record_t recover = xlat_get_recovery(code, pc, TRUE);
225 if( recover != NULL ) {
226 // Can be null if there is no recovery necessary
227 sh4_translate_run_recovery(recover);
228 }
229 }
230 // finally longjmp back into sh4_xlat_run_slice
231 xlat_running = FALSE;
232 longjmp(xlat_jmp_buf, exit_code);
233 }
235 void sh4_translate_breakpoint_hit(uint32_t pc)
236 {
237 if( sh4_starting && sh4r.slice_cycle == 0 && pc == sh4r.pc ) {
238 return;
239 }
240 sh4_translate_exit( XLAT_EXIT_BREAKPOINT );
241 }
243 /**
244 * Exit the current block at the end of the current instruction, flush the
245 * translation cache (completely) and return control to sh4_xlat_run_slice.
246 *
247 * As a special case, if the current instruction is actually the last
248 * instruction in the block (ie it's in a delay slot), this function
249 * returns to allow normal completion of the translation block. Otherwise
250 * this function never returns.
251 *
252 * Must only be invoked (indirectly) from within translated code.
253 */
254 void sh4_translate_flush_cache()
255 {
256 void *pc = xlat_get_native_pc();
257 assert( pc != NULL );
259 void *code = xlat_get_code( sh4r.pc );
260 xlat_recovery_record_t recover = xlat_get_recovery(code, pc, TRUE);
261 if( recover != NULL ) {
262 // Can be null if there is no recovery necessary
263 sh4_translate_run_recovery(recover);
264 xlat_flush_cache();
265 xlat_running = FALSE;
266 longjmp(xlat_jmp_buf, XLAT_EXIT_CONTINUE);
267 } else {
268 xlat_flush_cache();
269 return;
270 }
271 }
273 void *xlat_get_code_by_vma( sh4vma_t vma )
274 {
275 void *result = NULL;
277 if( IS_IN_ICACHE(vma) ) {
278 return xlat_get_code( GET_ICACHE_PHYS(vma) );
279 }
281 if( vma > 0xFFFFFF00 ) {
282 // lxdream hook
283 return NULL;
284 }
286 if( !mmu_update_icache(vma) ) {
287 // fault - off to the fault handler
288 if( !mmu_update_icache(sh4r.pc) ) {
289 // double fault - halt
290 ERROR( "Double fault - halting" );
291 dreamcast_stop();
292 return NULL;
293 }
294 }
296 assert( IS_IN_ICACHE(sh4r.pc) );
297 result = xlat_get_code( GET_ICACHE_PHYS(sh4r.pc) );
298 return result;
299 }
.