4 * SH4 translation core module. This part handles the non-target-specific
5 * section of the translation.
7 * Copyright (c) 2005 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
24 #include "sh4/sh4core.h"
25 #include "sh4/sh4trans.h"
26 #include "sh4/xltcache.h"
29 static jmp_buf xlat_jmp_buf;
30 static gboolean xlat_running = FALSE;
32 gboolean sh4_xlat_is_running()
38 * Execute a timeslice using translated code only (ie translate/execute loop)
40 uint32_t sh4_xlat_run_slice( uint32_t nanosecs )
44 if( sh4r.sh4_state != SH4_STATE_RUNNING ) {
45 if( sh4r.event_pending < nanosecs ) {
46 sh4r.sh4_state = SH4_STATE_RUNNING;
47 sh4r.slice_cycle = sh4r.event_pending;
51 switch( setjmp(xlat_jmp_buf) ) {
52 case XLAT_EXIT_BREAKPOINT:
53 sh4_clear_breakpoint( sh4r.pc, BREAK_ONESHOT );
56 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
57 TMU_run_slice( sh4r.slice_cycle );
58 SCIF_run_slice( sh4r.slice_cycle );
60 return sh4r.slice_cycle;
62 case XLAT_EXIT_SYSRESET:
68 void * (*code)() = NULL;
69 while( sh4r.slice_cycle < nanosecs ) {
70 if( sh4r.event_pending <= sh4r.slice_cycle ) {
71 if( sh4r.event_types & PENDING_EVENT ) {
74 /* Eventq execute may (quite likely) deliver an immediate IRQ */
75 if( sh4r.event_types & PENDING_IRQ ) {
76 sh4_accept_interrupt();
82 if( sh4r.pc > 0xFFFFFF00 ) {
83 syscall_invoke( sh4r.pc );
84 sh4r.in_delay_slot = 0;
88 code = xlat_get_code_by_vma( sh4r.pc );
90 code = sh4_translate_basic_block( sh4r.pc );
93 uint32_t oldpc = sh4r.pc;
100 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
101 TMU_run_slice( nanosecs );
102 SCIF_run_slice( nanosecs );
107 uint8_t *xlat_output;
108 xlat_cache_block_t xlat_current_block;
109 struct xlat_recovery_record xlat_recovery[MAX_RECOVERY_SIZE];
110 uint32_t xlat_recovery_posn;
112 void sh4_translate_add_recovery( uint32_t icount )
114 xlat_recovery[xlat_recovery_posn].xlat_offset =
115 ((uintptr_t)xlat_output) - ((uintptr_t)xlat_current_block->code);
116 xlat_recovery[xlat_recovery_posn].sh4_icount = icount;
117 xlat_recovery_posn++;
121 * Translate a linear basic block, ie all instructions from the start address
122 * (inclusive) until the next branch/jump instruction or the end of the page
124 * @return the address of the translated block
125 * eg due to lack of buffer space.
127 void * sh4_translate_basic_block( sh4addr_t start )
129 sh4addr_t pc = start;
130 sh4addr_t lastpc = (pc&0xFFFFF000)+0x1000;
132 xlat_current_block = xlat_start_block( start );
133 xlat_output = (uint8_t *)xlat_current_block->code;
134 xlat_recovery_posn = 0;
135 uint8_t *eob = xlat_output + xlat_current_block->size;
137 if( GET_ICACHE_END() < lastpc ) {
138 lastpc = GET_ICACHE_END();
141 sh4_translate_begin_block(pc);
144 /* check for breakpoints at this pc */
145 for( i=0; i<sh4_breakpoint_count; i++ ) {
146 if( sh4_breakpoints[i].address == pc ) {
147 sh4_translate_emit_breakpoint(pc);
151 if( eob - xlat_output < MAX_INSTRUCTION_SIZE ) {
152 uint8_t *oldstart = xlat_current_block->code;
153 xlat_current_block = xlat_extend_block( xlat_output - oldstart + MAX_INSTRUCTION_SIZE );
154 xlat_output = xlat_current_block->code + (xlat_output - oldstart);
155 eob = xlat_current_block->code + xlat_current_block->size;
157 done = sh4_translate_instruction( pc );
158 assert( xlat_output <= eob );
160 if ( pc >= lastpc ) {
165 int epilogue_size = sh4_translate_end_block_size();
166 uint32_t recovery_size = sizeof(struct xlat_recovery_record)*xlat_recovery_posn;
167 uint32_t finalsize = xlat_output - xlat_current_block->code + epilogue_size + recovery_size;
168 if( eob - xlat_output < finalsize ) {
169 uint8_t *oldstart = xlat_current_block->code;
170 xlat_current_block = xlat_extend_block( finalsize );
171 xlat_output = xlat_current_block->code + (xlat_output - oldstart);
173 sh4_translate_end_block(pc);
175 /* Write the recovery records onto the end of the code block */
176 memcpy( xlat_output, xlat_recovery, recovery_size);
177 xlat_current_block->recover_table_offset = xlat_output - (uint8_t *)xlat_current_block->code;
178 xlat_current_block->recover_table_size = xlat_recovery_posn;
179 xlat_commit_block( finalsize, pc-start );
180 return xlat_current_block->code;
184 * "Execute" the supplied recovery record. Currently this only updates
185 * sh4r.pc and sh4r.slice_cycle according to the currently executing
186 * instruction. In future this may be more sophisticated (ie will
187 * call into generated code).
189 void sh4_translate_run_recovery( xlat_recovery_record_t recovery )
191 sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
192 sh4r.pc += (recovery->sh4_icount<<1);
195 void sh4_translate_unwind_stack( gboolean abort_after, unwind_thunk_t thunk )
197 void *pc = xlat_get_native_pc();
199 assert( pc != NULL );
200 void *code = xlat_get_code( sh4r.pc );
201 xlat_recovery_record_t recover = xlat_get_recovery(code, pc, TRUE);
202 if( recover != NULL ) {
203 // Can be null if there is no recovery necessary
204 sh4_translate_run_recovery(recover);
206 if( thunk != NULL ) {
209 // finally longjmp back into sh4_xlat_run_slice
210 xlat_running = FALSE;
211 longjmp(xlat_jmp_buf, XLAT_EXIT_CONTINUE);
214 void sh4_translate_exit( int exit_code )
216 void *pc = xlat_get_native_pc();
218 // could be null if we're not actually running inside the translator
219 void *code = xlat_get_code( sh4r.pc );
220 xlat_recovery_record_t recover = xlat_get_recovery(code, pc, TRUE);
221 if( recover != NULL ) {
222 // Can be null if there is no recovery necessary
223 sh4_translate_run_recovery(recover);
226 // finally longjmp back into sh4_xlat_run_slice
227 xlat_running = FALSE;
228 longjmp(xlat_jmp_buf, exit_code);
231 void sh4_translate_breakpoint_hit(uint32_t pc)
233 if( sh4_starting && sh4r.slice_cycle == 0 && pc == sh4r.pc ) {
236 sh4_translate_exit( XLAT_EXIT_BREAKPOINT );
240 * Exit the current block at the end of the current instruction, flush the
241 * translation cache (completely) and return control to sh4_xlat_run_slice.
243 * As a special case, if the current instruction is actually the last
244 * instruction in the block (ie it's in a delay slot), this function
245 * returns to allow normal completion of the translation block. Otherwise
246 * this function never returns.
248 * Must only be invoked (indirectly) from within translated code.
250 void sh4_translate_flush_cache()
252 void *pc = xlat_get_native_pc();
253 assert( pc != NULL );
255 void *code = xlat_get_code( sh4r.pc );
256 xlat_recovery_record_t recover = xlat_get_recovery(code, pc, TRUE);
257 if( recover != NULL ) {
258 // Can be null if there is no recovery necessary
259 sh4_translate_run_recovery(recover);
261 xlat_running = FALSE;
262 longjmp(xlat_jmp_buf, XLAT_EXIT_CONTINUE);
269 void *xlat_get_code_by_vma( sh4vma_t vma )
273 if( IS_IN_ICACHE(vma) ) {
274 return xlat_get_code( GET_ICACHE_PHYS(vma) );
277 if( vma > 0xFFFFFF00 ) {
282 if( !mmu_update_icache(vma) ) {
283 // fault - off to the fault handler
284 if( !mmu_update_icache(sh4r.pc) ) {
285 // double fault - halt
286 ERROR( "Double fault - halting" );
292 assert( IS_IN_ICACHE(sh4r.pc) );
293 result = xlat_get_code( GET_ICACHE_PHYS(sh4r.pc) );
.