Search
lxdream.org :: lxdream/src/sh4/sh4trans.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/sh4trans.c
changeset 936:f394309c399a
prev914:72abecf5a315
next941:c67574ed4355
author nkeynes
date Sat Dec 27 03:14:59 2008 +0000 (13 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change Update sh4x86 to take advantage of SR assumptions. nice 2% there :)
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * SH4 translation core module. This part handles the non-target-specific
     5  * section of the translation.
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #include <assert.h>
    20 #include "eventq.h"
    21 #include "syscall.h"
    22 #include "clock.h"
    23 #include "dreamcast.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "sh4/xltcache.h"
    29 /**
    30  * Execute a timeslice using translated code only (ie translate/execute loop)
    31  */
    32 uint32_t sh4_translate_run_slice( uint32_t nanosecs ) 
    33 {
    34     void * (*code)() = NULL;
    35     while( sh4r.slice_cycle < nanosecs ) {
    36         if( sh4r.event_pending <= sh4r.slice_cycle ) {
    37             if( sh4r.event_types & PENDING_EVENT ) {
    38                 event_execute();
    39             }
    40             /* Eventq execute may (quite likely) deliver an immediate IRQ */
    41             if( sh4r.event_types & PENDING_IRQ ) {
    42                 sh4_accept_interrupt();
    43                 code = NULL;
    44             }
    45         }
    47         if( code == NULL ) {
    48             if( sh4r.pc > 0xFFFFFF00 ) {
    49                 syscall_invoke( sh4r.pc );
    50                 sh4r.in_delay_slot = 0;
    51                 sh4r.pc = sh4r.pr;
    52             }
    54             code = xlat_get_code_by_vma( sh4r.pc );
    55             if( code == NULL || sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {
    56                 code = sh4_translate_basic_block( sh4r.pc );
    57             }
    58         } else if( sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {
    59             code = sh4_translate_basic_block( sh4r.pc );
    60         }
    61         code = code();
    62     }
    63     return nanosecs;
    64 }
    66 uint8_t *xlat_output;
    67 xlat_cache_block_t xlat_current_block;
    68 struct xlat_recovery_record xlat_recovery[MAX_RECOVERY_SIZE];
    69 uint32_t xlat_recovery_posn;
    71 void sh4_translate_add_recovery( uint32_t icount )
    72 {
    73     xlat_recovery[xlat_recovery_posn].xlat_offset = 
    74         ((uintptr_t)xlat_output) - ((uintptr_t)xlat_current_block->code);
    75     xlat_recovery[xlat_recovery_posn].sh4_icount = icount;
    76     xlat_recovery_posn++;
    77 }
    79 /**
    80  * Translate a linear basic block, ie all instructions from the start address
    81  * (inclusive) until the next branch/jump instruction or the end of the page
    82  * is reached.
    83  * @param start VMA of the block start (which must already be in the icache)
    84  * @return the address of the translated block
    85  * eg due to lack of buffer space.
    86  */
    87 void * sh4_translate_basic_block( sh4addr_t start )
    88 {
    89     sh4addr_t pc = start;
    90     sh4addr_t lastpc = (pc&0xFFFFF000)+0x1000;
    91     int done, i;
    92     xlat_current_block = xlat_start_block( GET_ICACHE_PHYS(start) );
    93     xlat_output = (uint8_t *)xlat_current_block->code;
    94     xlat_recovery_posn = 0;
    95     uint8_t *eob = xlat_output + xlat_current_block->size;
    97     if( GET_ICACHE_END() < lastpc ) {
    98         lastpc = GET_ICACHE_END();
    99     }
   101     sh4_translate_begin_block(pc);
   103     do {
   104         /* check for breakpoints at this pc */
   105         for( i=0; i<sh4_breakpoint_count; i++ ) {
   106             if( sh4_breakpoints[i].address == pc ) {
   107                 sh4_translate_emit_breakpoint(pc);
   108                 break;
   109             }
   110         }
   111         if( eob - xlat_output < MAX_INSTRUCTION_SIZE ) {
   112             uint8_t *oldstart = xlat_current_block->code;
   113             xlat_current_block = xlat_extend_block( xlat_output - oldstart + MAX_INSTRUCTION_SIZE );
   114             xlat_output = xlat_current_block->code + (xlat_output - oldstart);
   115             eob = xlat_current_block->code + xlat_current_block->size;
   116         }
   117         done = sh4_translate_instruction( pc ); 
   118         assert( xlat_output <= eob );
   119         pc += 2;
   120         if ( pc >= lastpc ) {
   121             done = 2;
   122         }
   123     } while( !done );
   124     pc += (done - 2);
   126     // Add end-of-block recovery for post-instruction checks
   127     sh4_translate_add_recovery( (pc - start)>>1 ); 
   129     int epilogue_size = sh4_translate_end_block_size();
   130     uint32_t recovery_size = sizeof(struct xlat_recovery_record)*xlat_recovery_posn;
   131     uint32_t finalsize = (xlat_output - xlat_current_block->code) + epilogue_size + recovery_size;
   132     if( xlat_current_block->size < finalsize ) {
   133         uint8_t *oldstart = xlat_current_block->code;
   134         xlat_current_block = xlat_extend_block( finalsize );
   135         xlat_output = xlat_current_block->code + (xlat_output - oldstart);
   136     }	
   137     sh4_translate_end_block(pc);
   138     assert( xlat_output <= (xlat_current_block->code + xlat_current_block->size - recovery_size) );
   140     /* Write the recovery records onto the end of the code block */
   141     memcpy( xlat_output, xlat_recovery, recovery_size);
   142     xlat_current_block->recover_table_offset = xlat_output - (uint8_t *)xlat_current_block->code;
   143     xlat_current_block->recover_table_size = xlat_recovery_posn;
   144     xlat_current_block->xlat_sh4_mode = sh4r.xlat_sh4_mode;
   145     xlat_commit_block( finalsize, pc-start );
   146     return xlat_current_block->code;
   147 }
   149 /**
   150  * "Execute" the supplied recovery record. Currently this only updates
   151  * sh4r.pc and sh4r.slice_cycle according to the currently executing
   152  * instruction. In future this may be more sophisticated (ie will
   153  * call into generated code).
   154  */
   155 void sh4_translate_run_recovery( xlat_recovery_record_t recovery )
   156 {
   157     sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
   158     sh4r.pc += (recovery->sh4_icount<<1);
   159 }
   161 void sh4_translate_exit_recover( )
   162 {
   163     void *code = xlat_get_code_by_vma( sh4r.pc );
   164     if( code != NULL ) {
   165         uint32_t size = xlat_get_code_size( code );
   166         void *pc = xlat_get_native_pc( code, size );
   167         if( pc != NULL ) {
   168             // could be null if we're not actually running inside the translator
   169             xlat_recovery_record_t recover = xlat_get_post_recovery(code, pc, TRUE);
   170             if( recover != NULL ) {
   171                 // Can be null if there is no recovery necessary
   172                 sh4_translate_run_recovery(recover);
   173             }
   174         }
   175     }
   176 }
   178 void FASTCALL sh4_translate_breakpoint_hit(uint32_t pc)
   179 {
   180     if( sh4_starting && sh4r.slice_cycle == 0 && pc == sh4r.pc ) {
   181         return;
   182     }
   183     sh4_core_exit( CORE_EXIT_BREAKPOINT );
   184 }
   186 /**
   187  * Exit the current block at the end of the current instruction, flush the
   188  * translation cache (completely) and return control to sh4_xlat_run_slice.
   189  *
   190  * As a special case, if the current instruction is actually the last 
   191  * instruction in the block (ie it's in a delay slot), this function 
   192  * returns to allow normal completion of the translation block. Otherwise
   193  * this function never returns.
   194  *
   195  * Must only be invoked (indirectly) from within translated code.
   196  */
   197 gboolean sh4_translate_flush_cache()
   198 {
   199     void *code = xlat_get_code_by_vma( sh4r.pc );
   200     if( code != NULL ) {
   201         uint32_t size = xlat_get_code_size( code );
   202         void *pc = xlat_get_native_pc( code, size );
   203         assert( pc != NULL );
   205         xlat_recovery_record_t recover = xlat_get_post_recovery(code, pc, FALSE);
   206         if( recover != NULL ) {
   207             // Can be null if there is no recovery necessary
   208             sh4_translate_run_recovery(recover);
   209             xlat_flush_cache();
   210             return TRUE;
   211         } else {
   212             xlat_flush_cache();
   213             return FALSE;
   214         }
   215     }
   216 }
   218 void * FASTCALL xlat_get_code_by_vma( sh4vma_t vma )
   219 {
   220     void *result = NULL;
   222     if( IS_IN_ICACHE(vma) ) {
   223         return xlat_get_code( GET_ICACHE_PHYS(vma) );
   224     }
   226     if( vma > 0xFFFFFF00 ) {
   227         // lxdream hook
   228         return NULL;
   229     }
   231     if( !mmu_update_icache(vma) ) {
   232         // fault - off to the fault handler
   233         if( !mmu_update_icache(sh4r.pc) ) {
   234             // double fault - halt
   235             ERROR( "Double fault - halting" );
   236             sh4_core_exit(CORE_EXIT_HALT);
   237             return NULL;
   238         }
   239     }
   241     assert( IS_IN_ICACHE(sh4r.pc) );
   242     result = xlat_get_code( GET_ICACHE_PHYS(sh4r.pc) );
   243     return result;
   244 }
.