4 * SH4 translation core module. This part handles the non-target-specific
5 * section of the translation.
7 * Copyright (c) 2005 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
23 #include "dreamcast.h"
24 #include "sh4/sh4core.h"
25 #include "sh4/sh4trans.h"
26 #include "sh4/xltcache.h"
30 * Execute a timeslice using translated code only (ie translate/execute loop)
32 uint32_t sh4_translate_run_slice( uint32_t nanosecs )
34 void * (*code)() = NULL;
35 while( sh4r.slice_cycle < nanosecs ) {
36 if( sh4r.event_pending <= sh4r.slice_cycle ) {
37 if( sh4r.event_types & PENDING_EVENT ) {
40 /* Eventq execute may (quite likely) deliver an immediate IRQ */
41 if( sh4r.event_types & PENDING_IRQ ) {
42 sh4_accept_interrupt();
48 if( sh4r.pc > 0xFFFFFF00 ) {
49 syscall_invoke( sh4r.pc );
50 sh4r.in_delay_slot = 0;
54 code = xlat_get_code_by_vma( sh4r.pc );
55 if( code == NULL || sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {
56 code = sh4_translate_basic_block( sh4r.pc );
58 } else if( sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {
59 if( !IS_IN_ICACHE(sh4r.pc) ) {
60 /* If TLB is off, we may have gotten here without updating
61 * the icache, so do it now. This should never fail, so...
63 mmu_update_icache(sh4r.pc);
64 assert( IS_IN_ICACHE(sh4r.pc) );
66 code = sh4_translate_basic_block( sh4r.pc );
74 xlat_cache_block_t xlat_current_block;
75 struct xlat_recovery_record xlat_recovery[MAX_RECOVERY_SIZE];
76 uint32_t xlat_recovery_posn;
78 void sh4_translate_add_recovery( uint32_t icount )
80 xlat_recovery[xlat_recovery_posn].xlat_offset =
81 ((uintptr_t)xlat_output) - ((uintptr_t)xlat_current_block->code);
82 xlat_recovery[xlat_recovery_posn].sh4_icount = icount;
87 * Translate a linear basic block, ie all instructions from the start address
88 * (inclusive) until the next branch/jump instruction or the end of the page
90 * @param start VMA of the block start (which must already be in the icache)
91 * @return the address of the translated block
92 * eg due to lack of buffer space.
94 void * sh4_translate_basic_block( sh4addr_t start )
97 sh4addr_t lastpc = (pc&0xFFFFF000)+0x1000;
99 xlat_current_block = xlat_start_block( GET_ICACHE_PHYS(start) );
100 xlat_output = (uint8_t *)xlat_current_block->code;
101 xlat_recovery_posn = 0;
102 uint8_t *eob = xlat_output + xlat_current_block->size;
104 if( GET_ICACHE_END() < lastpc ) {
105 lastpc = GET_ICACHE_END();
108 sh4_translate_begin_block(pc);
111 /* check for breakpoints at this pc */
112 for( i=0; i<sh4_breakpoint_count; i++ ) {
113 if( sh4_breakpoints[i].address == pc ) {
114 sh4_translate_emit_breakpoint(pc);
118 if( eob - xlat_output < MAX_INSTRUCTION_SIZE ) {
119 uint8_t *oldstart = xlat_current_block->code;
120 xlat_current_block = xlat_extend_block( xlat_output - oldstart + MAX_INSTRUCTION_SIZE );
121 xlat_output = xlat_current_block->code + (xlat_output - oldstart);
122 eob = xlat_current_block->code + xlat_current_block->size;
124 done = sh4_translate_instruction( pc );
125 assert( xlat_output <= eob );
127 if ( pc >= lastpc ) {
133 // Add end-of-block recovery for post-instruction checks
134 sh4_translate_add_recovery( (pc - start)>>1 );
136 int epilogue_size = sh4_translate_end_block_size();
137 uint32_t recovery_size = sizeof(struct xlat_recovery_record)*xlat_recovery_posn;
138 uint32_t finalsize = (xlat_output - xlat_current_block->code) + epilogue_size + recovery_size;
139 if( xlat_current_block->size < finalsize ) {
140 uint8_t *oldstart = xlat_current_block->code;
141 xlat_current_block = xlat_extend_block( finalsize );
142 xlat_output = xlat_current_block->code + (xlat_output - oldstart);
144 sh4_translate_end_block(pc);
145 assert( xlat_output <= (xlat_current_block->code + xlat_current_block->size - recovery_size) );
147 /* Write the recovery records onto the end of the code block */
148 memcpy( xlat_output, xlat_recovery, recovery_size);
149 xlat_current_block->recover_table_offset = xlat_output - (uint8_t *)xlat_current_block->code;
150 xlat_current_block->recover_table_size = xlat_recovery_posn;
151 xlat_current_block->xlat_sh4_mode = sh4r.xlat_sh4_mode;
152 xlat_commit_block( finalsize, pc-start );
153 return xlat_current_block->code;
157 * "Execute" the supplied recovery record. Currently this only updates
158 * sh4r.pc and sh4r.slice_cycle according to the currently executing
159 * instruction. In future this may be more sophisticated (ie will
160 * call into generated code).
162 void sh4_translate_run_recovery( xlat_recovery_record_t recovery )
164 sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
165 sh4r.pc += (recovery->sh4_icount<<1);
169 * Same as sh4_translate_run_recovery, but is used to recover from a taken
170 * exception - that is, it fixes sh4r.spc rather than sh4r.pc
172 void sh4_translate_run_exception_recovery( xlat_recovery_record_t recovery )
174 sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
175 sh4r.spc += (recovery->sh4_icount<<1);
178 void sh4_translate_exit_recover( )
180 void *code = xlat_get_code_by_vma( sh4r.pc );
182 uint32_t size = xlat_get_code_size( code );
183 void *pc = xlat_get_native_pc( code, size );
185 // could be null if we're not actually running inside the translator
186 xlat_recovery_record_t recover = xlat_get_pre_recovery(code, pc);
187 if( recover != NULL ) {
188 // Can be null if there is no recovery necessary
189 sh4_translate_run_recovery(recover);
195 void sh4_translate_exception_exit_recover( )
197 void *code = xlat_get_code_by_vma( sh4r.spc );
199 uint32_t size = xlat_get_code_size( code );
200 void *pc = xlat_get_native_pc( code, size );
202 // could be null if we're not actually running inside the translator
203 xlat_recovery_record_t recover = xlat_get_pre_recovery(code, pc);
204 if( recover != NULL ) {
205 // Can be null if there is no recovery necessary
206 sh4_translate_run_exception_recovery(recover);
213 void FASTCALL sh4_translate_breakpoint_hit(uint32_t pc)
215 if( sh4_starting && sh4r.slice_cycle == 0 && pc == sh4r.pc ) {
218 sh4_core_exit( CORE_EXIT_BREAKPOINT );
221 void * FASTCALL xlat_get_code_by_vma( sh4vma_t vma )
225 if( IS_IN_ICACHE(vma) ) {
226 return xlat_get_code( GET_ICACHE_PHYS(vma) );
229 if( vma > 0xFFFFFF00 ) {
234 if( !mmu_update_icache(vma) ) {
235 // fault - off to the fault handler
236 if( !mmu_update_icache(sh4r.pc) ) {
237 // double fault - halt
238 ERROR( "Double fault - halting" );
239 sh4_core_exit(CORE_EXIT_HALT);
244 assert( IS_IN_ICACHE(sh4r.pc) );
245 result = xlat_get_code( GET_ICACHE_PHYS(sh4r.pc) );
.