4 * SH4 translation core module. This part handles the non-target-specific
5 * section of the translation.
7 * Copyright (c) 2005 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
24 #include "sh4/sh4core.h"
25 #include "sh4/sh4trans.h"
26 #include "sh4/xltcache.h"
29 static jmp_buf xlat_jmp_buf;
30 static gboolean xlat_running = FALSE;
32 gboolean sh4_xlat_is_running()
38 * Execute a timeslice using translated code only (ie translate/execute loop)
40 uint32_t sh4_xlat_run_slice( uint32_t nanosecs )
44 if( sh4r.sh4_state != SH4_STATE_RUNNING ) {
45 if( sh4r.event_pending < nanosecs ) {
46 sh4r.sh4_state = SH4_STATE_RUNNING;
47 sh4r.slice_cycle = sh4r.event_pending;
51 switch( setjmp(xlat_jmp_buf) ) {
52 case XLAT_EXIT_BREAKPOINT:
53 sh4_clear_breakpoint( sh4r.pc, BREAK_ONESHOT );
56 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
57 TMU_run_slice( sh4r.slice_cycle );
58 SCIF_run_slice( sh4r.slice_cycle );
60 return sh4r.slice_cycle;
62 case XLAT_EXIT_SYSRESET:
68 void * (*code)() = NULL;
69 while( sh4r.slice_cycle < nanosecs ) {
70 if( sh4r.event_pending <= sh4r.slice_cycle ) {
71 if( sh4r.event_types & PENDING_EVENT ) {
74 /* Eventq execute may (quite likely) deliver an immediate IRQ */
75 if( sh4r.event_types & PENDING_IRQ ) {
76 sh4_accept_interrupt();
82 if( sh4r.pc > 0xFFFFFF00 ) {
83 syscall_invoke( sh4r.pc );
84 sh4r.in_delay_slot = 0;
88 code = xlat_get_code_by_vma( sh4r.pc );
90 code = sh4_translate_basic_block( sh4r.pc );
99 if( sh4r.sh4_state != SH4_STATE_STANDBY ) {
100 TMU_run_slice( nanosecs );
101 SCIF_run_slice( nanosecs );
106 uint8_t *xlat_output;
107 struct xlat_recovery_record xlat_recovery[MAX_RECOVERY_SIZE];
108 uint32_t xlat_recovery_posn;
111 * Translate a linear basic block, ie all instructions from the start address
112 * (inclusive) until the next branch/jump instruction or the end of the page
114 * @return the address of the translated block
115 * eg due to lack of buffer space.
117 void * sh4_translate_basic_block( sh4addr_t start )
119 sh4addr_t pc = start;
120 sh4addr_t lastpc = (pc&0xFFFFF000)+0x1000;
122 xlat_cache_block_t block = xlat_start_block( start );
123 xlat_output = (uint8_t *)block->code;
124 xlat_recovery_posn = 0;
125 uint8_t *eob = xlat_output + block->size;
127 if( GET_ICACHE_END() < lastpc ) {
128 lastpc = GET_ICACHE_END();
131 sh4_translate_begin_block(pc);
134 /* check for breakpoints at this pc */
135 for( i=0; i<sh4_breakpoint_count; i++ ) {
136 if( sh4_breakpoints[i].address == pc ) {
137 sh4_translate_emit_breakpoint(pc);
141 if( eob - xlat_output < MAX_INSTRUCTION_SIZE ) {
142 uint8_t *oldstart = block->code;
143 block = xlat_extend_block( xlat_output - oldstart + MAX_INSTRUCTION_SIZE );
144 xlat_output = block->code + (xlat_output - oldstart);
145 eob = block->code + block->size;
147 done = sh4_translate_instruction( pc );
148 assert( xlat_output <= eob );
150 if ( pc >= lastpc ) {
155 if( eob - xlat_output < EPILOGUE_SIZE ) {
156 uint8_t *oldstart = block->code;
157 block = xlat_extend_block( xlat_output - oldstart + EPILOGUE_SIZE );
158 xlat_output = block->code + (xlat_output - oldstart);
160 sh4_translate_end_block(pc);
162 /* Write the recovery records onto the end of the code block */
163 uint32_t recovery_size = sizeof(struct xlat_recovery_record)*xlat_recovery_posn;
164 uint32_t finalsize = xlat_output - block->code + recovery_size;
165 if( finalsize > block->size ) {
166 uint8_t *oldstart = block->code;
167 block = xlat_extend_block( finalsize );
168 xlat_output = block->code + (xlat_output - oldstart);
170 memcpy( xlat_output, xlat_recovery, recovery_size);
171 block->recover_table_offset = xlat_output - (uint8_t *)block->code;
172 block->recover_table_size = xlat_recovery_posn;
173 xlat_commit_block( finalsize, pc-start );
178 * "Execute" the supplied recovery record. Currently this only updates
179 * sh4r.pc and sh4r.slice_cycle according to the currently executing
180 * instruction. In future this may be more sophisticated (ie will
181 * call into generated code).
183 void sh4_translate_run_recovery( xlat_recovery_record_t recovery )
185 sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
186 sh4r.pc += (recovery->sh4_icount<<1);
189 void sh4_translate_unwind_stack( gboolean abort_after, unwind_thunk_t thunk )
191 void *pc = xlat_get_native_pc();
193 assert( pc != NULL );
194 void *code = xlat_get_code( sh4r.pc );
195 xlat_recovery_record_t recover = xlat_get_recovery(code, pc, TRUE);
196 if( recover != NULL ) {
197 // Can be null if there is no recovery necessary
198 sh4_translate_run_recovery(recover);
200 if( thunk != NULL ) {
203 // finally longjmp back into sh4_xlat_run_slice
204 xlat_running = FALSE;
205 longjmp(xlat_jmp_buf, XLAT_EXIT_CONTINUE);
208 void sh4_translate_exit( int exit_code )
210 void *pc = xlat_get_native_pc();
212 // could be null if we're not actually running inside the translator
213 void *code = xlat_get_code( sh4r.pc );
214 xlat_recovery_record_t recover = xlat_get_recovery(code, pc, TRUE);
215 if( recover != NULL ) {
216 // Can be null if there is no recovery necessary
217 sh4_translate_run_recovery(recover);
220 // finally longjmp back into sh4_xlat_run_slice
221 xlat_running = FALSE;
222 longjmp(xlat_jmp_buf, exit_code);
225 void sh4_translate_breakpoint_hit(uint32_t pc)
227 if( sh4_starting && sh4r.slice_cycle == 0 && pc == sh4r.pc ) {
230 sh4_translate_exit( XLAT_EXIT_BREAKPOINT );
234 * Exit the current block at the end of the current instruction, flush the
235 * translation cache (completely) and return control to sh4_xlat_run_slice.
237 * As a special case, if the current instruction is actually the last
238 * instruction in the block (ie it's in a delay slot), this function
239 * returns to allow normal completion of the translation block. Otherwise
240 * this function never returns.
242 * Must only be invoked (indirectly) from within translated code.
244 void sh4_translate_flush_cache()
246 void *pc = xlat_get_native_pc();
247 assert( pc != NULL );
249 void *code = xlat_get_code( sh4r.pc );
250 xlat_recovery_record_t recover = xlat_get_recovery(code, pc, TRUE);
251 if( recover != NULL ) {
252 // Can be null if there is no recovery necessary
253 sh4_translate_run_recovery(recover);
255 xlat_running = FALSE;
256 longjmp(xlat_jmp_buf, XLAT_EXIT_CONTINUE);
263 void *xlat_get_code_by_vma( sh4vma_t vma )
267 if( IS_IN_ICACHE(vma) ) {
268 result = xlat_get_code( GET_ICACHE_PHYS(vma) );
271 if( vma > 0xFFFFFF00 ) {
276 if( !mmu_update_icache(vma) ) {
277 // fault - off to the fault handler
278 if( !mmu_update_icache(sh4r.pc) ) {
279 // double fault - halt
280 ERROR( "Double fault - halting" );
286 assert( IS_IN_ICACHE(sh4r.pc) );
287 result = xlat_get_code( GET_ICACHE_PHYS(sh4r.pc) );
.