Search
lxdream.org :: lxdream/src/sh4/sh4trans.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/sh4trans.c
changeset 1065:bc1cc0c54917
prev1014:f5914b2fd0db
prev585:371342a39c09
next1067:d3c00ffccfcd
author nkeynes
date Sun Jul 05 13:52:50 2009 +1000 (12 years ago)
permissions -rw-r--r--
last change No-op merge lxdream-mmu to remove head (actually merged long ago)
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * SH4 translation core module. This part handles the non-target-specific
     5  * section of the translation.
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #include <assert.h>
    20 #include "eventq.h"
    21 #include "syscall.h"
    22 #include "clock.h"
    23 #include "dreamcast.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "sh4/sh4mmio.h"
    27 #include "sh4/mmu.h"
    28 #include "xlat/xltcache.h"
    30 /**
    31  * Execute a timeslice using translated code only (ie translate/execute loop)
    32  */
    33 uint32_t sh4_translate_run_slice( uint32_t nanosecs ) 
    34 {
    35     void * (*code)() = NULL;
    36     while( sh4r.slice_cycle < nanosecs ) {
    37         if( sh4r.event_pending <= sh4r.slice_cycle ) {
    38             if( sh4r.event_types & PENDING_EVENT ) {
    39                 event_execute();
    40             }
    41             /* Eventq execute may (quite likely) deliver an immediate IRQ */
    42             if( sh4r.event_types & PENDING_IRQ ) {
    43                 sh4_accept_interrupt();
    44                 code = NULL;
    45             }
    46         }
    48         if( code == NULL ) {
    49             if( IS_SYSCALL(sh4r.pc) ) {
    50                 syscall_invoke( sh4r.pc );
    51                 sh4r.in_delay_slot = 0;
    52                 sh4r.pc = sh4r.pr;
    53             }
    55             code = xlat_get_code_by_vma( sh4r.pc );
    56             if( code == NULL || sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {
    57                 code = sh4_translate_basic_block( sh4r.pc );
    58             }
    59         } else if( sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {
    60             if( !IS_IN_ICACHE(sh4r.pc) ) {
    61                 /* If TLB is off, we may have gotten here without updating
    62                  * the icache, so do it now. This should never fail, so...
    63                  */
    64                 mmu_update_icache(sh4r.pc);
    65                 assert( IS_IN_ICACHE(sh4r.pc) ); 
    66             }
    67             code = sh4_translate_basic_block( sh4r.pc );
    68         }
    69         code = code();
    70     }
    71     return nanosecs;
    72 }
    74 uint8_t *xlat_output;
    75 xlat_cache_block_t xlat_current_block;
    76 struct xlat_recovery_record xlat_recovery[MAX_RECOVERY_SIZE];
    77 uint32_t xlat_recovery_posn;
    79 void sh4_translate_add_recovery( uint32_t icount )
    80 {
    81     xlat_recovery[xlat_recovery_posn].xlat_offset = 
    82         ((uintptr_t)xlat_output) - ((uintptr_t)xlat_current_block->code);
    83     xlat_recovery[xlat_recovery_posn].sh4_icount = icount;
    84     xlat_recovery_posn++;
    85 }
    87 /**
    88  * Translate a linear basic block, ie all instructions from the start address
    89  * (inclusive) until the next branch/jump instruction or the end of the page
    90  * is reached.
    91  * @param start VMA of the block start (which must already be in the icache)
    92  * @return the address of the translated block
    93  * eg due to lack of buffer space.
    94  */
    95 void * sh4_translate_basic_block( sh4addr_t start )
    96 {
    97     sh4addr_t pc = start;
    98     sh4addr_t lastpc = (pc&0xFFFFF000)+0x1000;
    99     int done, i;
   100     xlat_current_block = xlat_start_block( GET_ICACHE_PHYS(start) );
   101     xlat_output = (uint8_t *)xlat_current_block->code;
   102     xlat_recovery_posn = 0;
   103     uint8_t *eob = xlat_output + xlat_current_block->size;
   105     if( GET_ICACHE_END() < lastpc ) {
   106         lastpc = GET_ICACHE_END();
   107     }
   109     sh4_translate_begin_block(pc);
   111     do {
   112         if( eob - xlat_output < MAX_INSTRUCTION_SIZE ) {
   113             uint8_t *oldstart = xlat_current_block->code;
   114             xlat_current_block = xlat_extend_block( xlat_output - oldstart + MAX_INSTRUCTION_SIZE );
   115             xlat_output = xlat_current_block->code + (xlat_output - oldstart);
   116             eob = xlat_current_block->code + xlat_current_block->size;
   117         }
   118         done = sh4_translate_instruction( pc ); 
   119         assert( xlat_output <= eob );
   120         pc += 2;
   121         if ( pc >= lastpc ) {
   122             done = 2;
   123         }
   124     } while( !done );
   125     pc += (done - 2);
   127     // Add end-of-block recovery for post-instruction checks
   128     sh4_translate_add_recovery( (pc - start)>>1 ); 
   130     int epilogue_size = sh4_translate_end_block_size();
   131     uint32_t recovery_size = sizeof(struct xlat_recovery_record)*xlat_recovery_posn;
   132     uint32_t finalsize = (xlat_output - xlat_current_block->code) + epilogue_size + recovery_size;
   133     if( xlat_current_block->size < finalsize ) {
   134         uint8_t *oldstart = xlat_current_block->code;
   135         xlat_current_block = xlat_extend_block( finalsize );
   136         xlat_output = xlat_current_block->code + (xlat_output - oldstart);
   137     }	
   138     sh4_translate_end_block(pc);
   139     assert( xlat_output <= (xlat_current_block->code + xlat_current_block->size - recovery_size) );
   141     /* Write the recovery records onto the end of the code block */
   142     memcpy( xlat_output, xlat_recovery, recovery_size);
   143     xlat_current_block->recover_table_offset = xlat_output - (uint8_t *)xlat_current_block->code;
   144     xlat_current_block->recover_table_size = xlat_recovery_posn;
   145     xlat_current_block->xlat_sh4_mode = sh4r.xlat_sh4_mode;
   146     xlat_commit_block( finalsize, pc-start );
   147     return xlat_current_block->code;
   148 }
   150 /**
   151  * "Execute" the supplied recovery record. Currently this only updates
   152  * sh4r.pc and sh4r.slice_cycle according to the currently executing
   153  * instruction. In future this may be more sophisticated (ie will
   154  * call into generated code).
   155  */
   156 void sh4_translate_run_recovery( xlat_recovery_record_t recovery )
   157 {
   158     sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
   159     sh4r.pc += (recovery->sh4_icount<<1);
   160 }
   162 /**
   163  * Same as sh4_translate_run_recovery, but is used to recover from a taken
   164  * exception - that is, it fixes sh4r.spc rather than sh4r.pc
   165  */
   166 void sh4_translate_run_exception_recovery( xlat_recovery_record_t recovery )
   167 {
   168     sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
   169     sh4r.spc += (recovery->sh4_icount<<1);
   170 }    
   172 void sh4_translate_exit_recover( )
   173 {
   174     void *code = xlat_get_code_by_vma( sh4r.pc );
   175     if( code != NULL ) {
   176         uint32_t size = xlat_get_code_size( code );
   177         void *pc = xlat_get_native_pc( code, size );
   178         if( pc != NULL ) {
   179             // could be null if we're not actually running inside the translator
   180             xlat_recovery_record_t recover = xlat_get_pre_recovery(code, pc);
   181             if( recover != NULL ) {
   182                 // Can be null if there is no recovery necessary
   183                 sh4_translate_run_recovery(recover);
   184             }
   185         }
   186     }
   187 }
   189 void sh4_translate_exception_exit_recover( )
   190 {
   191     void *code = xlat_get_code_by_vma( sh4r.spc );
   192     if( code != NULL ) {
   193         uint32_t size = xlat_get_code_size( code );
   194         void *pc = xlat_get_native_pc( code, size );
   195         if( pc != NULL ) {
   196             // could be null if we're not actually running inside the translator
   197             xlat_recovery_record_t recover = xlat_get_pre_recovery(code, pc);
   198             if( recover != NULL ) {
   199                 // Can be null if there is no recovery necessary
   200                 sh4_translate_run_exception_recovery(recover);
   201             }
   202         }
   203     }
   205 }
   207 void FASTCALL sh4_translate_breakpoint_hit(uint32_t pc)
   208 {
   209     if( sh4_starting && sh4r.slice_cycle == 0 && pc == sh4r.pc ) {
   210         return;
   211     }
   212     sh4_core_exit( CORE_EXIT_BREAKPOINT );
   213 }
   215 void * FASTCALL xlat_get_code_by_vma( sh4vma_t vma )
   216 {
   217     void *result = NULL;
   219     if( IS_IN_ICACHE(vma) ) {
   220         return xlat_get_code( GET_ICACHE_PHYS(vma) );
   221     }
   223     if( IS_SYSCALL(vma) ) {
   224         // lxdream hook
   225         return NULL;
   226     }
   228     if( !mmu_update_icache(vma) ) {
   229         // fault - off to the fault handler
   230         if( !mmu_update_icache(sh4r.pc) ) {
   231             // double fault - halt
   232             ERROR( "Double fault - halting" );
   233             sh4_core_exit(CORE_EXIT_HALT);
   234             return NULL;
   235         }
   236     }
   238     assert( IS_IN_ICACHE(sh4r.pc) );
   239     result = xlat_get_code( GET_ICACHE_PHYS(sh4r.pc) );
   240     return result;
   241 }
.