Search
lxdream.org :: lxdream/src/sh4/sh4trans.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/sh4trans.c
changeset 949:d7833018931f
prev941:c67574ed4355
next1067:d3c00ffccfcd
author nkeynes
date Wed Jan 07 04:39:58 2009 +0000 (15 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change Add missed file from previous commit - remove sh4_translate_flush_cache, change
exit to use pre-recovery
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * SH4 translation core module. This part handles the non-target-specific
     5  * section of the translation.
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #include <assert.h>
    20 #include "eventq.h"
    21 #include "syscall.h"
    22 #include "clock.h"
    23 #include "dreamcast.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "sh4/xltcache.h"
    29 /**
    30  * Execute a timeslice using translated code only (ie translate/execute loop)
    31  */
    32 uint32_t sh4_translate_run_slice( uint32_t nanosecs ) 
    33 {
    34     void * (*code)() = NULL;
    35     while( sh4r.slice_cycle < nanosecs ) {
    36         if( sh4r.event_pending <= sh4r.slice_cycle ) {
    37             if( sh4r.event_types & PENDING_EVENT ) {
    38                 event_execute();
    39             }
    40             /* Eventq execute may (quite likely) deliver an immediate IRQ */
    41             if( sh4r.event_types & PENDING_IRQ ) {
    42                 sh4_accept_interrupt();
    43                 code = NULL;
    44             }
    45         }
    47         if( code == NULL ) {
    48             if( sh4r.pc > 0xFFFFFF00 ) {
    49                 syscall_invoke( sh4r.pc );
    50                 sh4r.in_delay_slot = 0;
    51                 sh4r.pc = sh4r.pr;
    52             }
    54             code = xlat_get_code_by_vma( sh4r.pc );
    55             if( code == NULL || sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {
    56                 code = sh4_translate_basic_block( sh4r.pc );
    57             }
    58         } else if( sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {
    59             code = sh4_translate_basic_block( sh4r.pc );
    60         }
    61         code = code();
    62     }
    63     return nanosecs;
    64 }
    66 uint8_t *xlat_output;
    67 xlat_cache_block_t xlat_current_block;
    68 struct xlat_recovery_record xlat_recovery[MAX_RECOVERY_SIZE];
    69 uint32_t xlat_recovery_posn;
    71 void sh4_translate_add_recovery( uint32_t icount )
    72 {
    73     xlat_recovery[xlat_recovery_posn].xlat_offset = 
    74         ((uintptr_t)xlat_output) - ((uintptr_t)xlat_current_block->code);
    75     xlat_recovery[xlat_recovery_posn].sh4_icount = icount;
    76     xlat_recovery_posn++;
    77 }
    79 /**
    80  * Translate a linear basic block, ie all instructions from the start address
    81  * (inclusive) until the next branch/jump instruction or the end of the page
    82  * is reached.
    83  * @param start VMA of the block start (which must already be in the icache)
    84  * @return the address of the translated block
    85  * eg due to lack of buffer space.
    86  */
    87 void * sh4_translate_basic_block( sh4addr_t start )
    88 {
    89     sh4addr_t pc = start;
    90     sh4addr_t lastpc = (pc&0xFFFFF000)+0x1000;
    91     int done, i;
    92     xlat_current_block = xlat_start_block( GET_ICACHE_PHYS(start) );
    93     xlat_output = (uint8_t *)xlat_current_block->code;
    94     xlat_recovery_posn = 0;
    95     uint8_t *eob = xlat_output + xlat_current_block->size;
    97     if( GET_ICACHE_END() < lastpc ) {
    98         lastpc = GET_ICACHE_END();
    99     }
   101     sh4_translate_begin_block(pc);
   103     do {
   104         /* check for breakpoints at this pc */
   105         for( i=0; i<sh4_breakpoint_count; i++ ) {
   106             if( sh4_breakpoints[i].address == pc ) {
   107                 sh4_translate_emit_breakpoint(pc);
   108                 break;
   109             }
   110         }
   111         if( eob - xlat_output < MAX_INSTRUCTION_SIZE ) {
   112             uint8_t *oldstart = xlat_current_block->code;
   113             xlat_current_block = xlat_extend_block( xlat_output - oldstart + MAX_INSTRUCTION_SIZE );
   114             xlat_output = xlat_current_block->code + (xlat_output - oldstart);
   115             eob = xlat_current_block->code + xlat_current_block->size;
   116         }
   117         done = sh4_translate_instruction( pc ); 
   118         assert( xlat_output <= eob );
   119         pc += 2;
   120         if ( pc >= lastpc ) {
   121             done = 2;
   122         }
   123     } while( !done );
   124     pc += (done - 2);
   126     // Add end-of-block recovery for post-instruction checks
   127     sh4_translate_add_recovery( (pc - start)>>1 ); 
   129     int epilogue_size = sh4_translate_end_block_size();
   130     uint32_t recovery_size = sizeof(struct xlat_recovery_record)*xlat_recovery_posn;
   131     uint32_t finalsize = (xlat_output - xlat_current_block->code) + epilogue_size + recovery_size;
   132     if( xlat_current_block->size < finalsize ) {
   133         uint8_t *oldstart = xlat_current_block->code;
   134         xlat_current_block = xlat_extend_block( finalsize );
   135         xlat_output = xlat_current_block->code + (xlat_output - oldstart);
   136     }	
   137     sh4_translate_end_block(pc);
   138     assert( xlat_output <= (xlat_current_block->code + xlat_current_block->size - recovery_size) );
   140     /* Write the recovery records onto the end of the code block */
   141     memcpy( xlat_output, xlat_recovery, recovery_size);
   142     xlat_current_block->recover_table_offset = xlat_output - (uint8_t *)xlat_current_block->code;
   143     xlat_current_block->recover_table_size = xlat_recovery_posn;
   144     xlat_current_block->xlat_sh4_mode = sh4r.xlat_sh4_mode;
   145     xlat_commit_block( finalsize, pc-start );
   146     return xlat_current_block->code;
   147 }
   149 /**
   150  * "Execute" the supplied recovery record. Currently this only updates
   151  * sh4r.pc and sh4r.slice_cycle according to the currently executing
   152  * instruction. In future this may be more sophisticated (ie will
   153  * call into generated code).
   154  */
   155 void sh4_translate_run_recovery( xlat_recovery_record_t recovery )
   156 {
   157     sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
   158     sh4r.pc += (recovery->sh4_icount<<1);
   159 }
   161 /**
   162  * Same as sh4_translate_run_recovery, but is used to recover from a taken
   163  * exception - that is, it fixes sh4r.spc rather than sh4r.pc
   164  */
   165 void sh4_translate_run_exception_recovery( xlat_recovery_record_t recovery )
   166 {
   167     sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
   168     sh4r.spc += (recovery->sh4_icount<<1);
   169 }    
   171 void sh4_translate_exit_recover( )
   172 {
   173     void *code = xlat_get_code_by_vma( sh4r.pc );
   174     if( code != NULL ) {
   175         uint32_t size = xlat_get_code_size( code );
   176         void *pc = xlat_get_native_pc( code, size );
   177         if( pc != NULL ) {
   178             // could be null if we're not actually running inside the translator
   179             xlat_recovery_record_t recover = xlat_get_pre_recovery(code, pc);
   180             if( recover != NULL ) {
   181                 // Can be null if there is no recovery necessary
   182                 sh4_translate_run_recovery(recover);
   183             }
   184         }
   185     }
   186 }
   188 void sh4_translate_exception_exit_recover( )
   189 {
   190     void *code = xlat_get_code_by_vma( sh4r.spc );
   191     if( code != NULL ) {
   192         uint32_t size = xlat_get_code_size( code );
   193         void *pc = xlat_get_native_pc( code, size );
   194         if( pc != NULL ) {
   195             // could be null if we're not actually running inside the translator
   196             xlat_recovery_record_t recover = xlat_get_pre_recovery(code, pc);
   197             if( recover != NULL ) {
   198                 // Can be null if there is no recovery necessary
   199                 sh4_translate_run_exception_recovery(recover);
   200             }
   201         }
   202     }
   204 }
   206 void FASTCALL sh4_translate_breakpoint_hit(uint32_t pc)
   207 {
   208     if( sh4_starting && sh4r.slice_cycle == 0 && pc == sh4r.pc ) {
   209         return;
   210     }
   211     sh4_core_exit( CORE_EXIT_BREAKPOINT );
   212 }
   214 void * FASTCALL xlat_get_code_by_vma( sh4vma_t vma )
   215 {
   216     void *result = NULL;
   218     if( IS_IN_ICACHE(vma) ) {
   219         return xlat_get_code( GET_ICACHE_PHYS(vma) );
   220     }
   222     if( vma > 0xFFFFFF00 ) {
   223         // lxdream hook
   224         return NULL;
   225     }
   227     if( !mmu_update_icache(vma) ) {
   228         // fault - off to the fault handler
   229         if( !mmu_update_icache(sh4r.pc) ) {
   230             // double fault - halt
   231             ERROR( "Double fault - halting" );
   232             sh4_core_exit(CORE_EXIT_HALT);
   233             return NULL;
   234         }
   235     }
   237     assert( IS_IN_ICACHE(sh4r.pc) );
   238     result = xlat_get_code( GET_ICACHE_PHYS(sh4r.pc) );
   239     return result;
   240 }
.