Search
lxdream.org :: lxdream/src/sh4/sh4trans.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/sh4trans.c
changeset 1125:9dd5dee45db9
prev1112:4cac5e474d4c
next1149:da6124fceec6
author nkeynes
date Fri Sep 17 20:04:02 2010 +1000 (13 years ago)
permissions -rw-r--r--
last change Add missing shadow.c
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * SH4 translation core module. This part handles the non-target-specific
     5  * section of the translation.
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #include <assert.h>
    20 #include "eventq.h"
    21 #include "syscall.h"
    22 #include "clock.h"
    23 #include "dreamcast.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "sh4/sh4mmio.h"
    27 #include "sh4/mmu.h"
    28 #include "xlat/xltcache.h"
    30 //#define SINGLESTEP 1
    32 /**
    33  * Execute a timeslice using translated code only (ie translate/execute loop)
    34  */
    35 uint32_t sh4_translate_run_slice( uint32_t nanosecs ) 
    36 {
    37     void * (*code)() = NULL;
    38     event_schedule( EVENT_ENDTIMESLICE, nanosecs );
    39     for(;;) {
    40         if( sh4r.event_pending <= sh4r.slice_cycle ) {
    41             if( sh4r.event_types & PENDING_EVENT ) {
    42                 event_execute();
    43             }
    44             /* Eventq execute may (quite likely) deliver an immediate IRQ */
    45             if( sh4r.event_types & PENDING_IRQ ) {
    46                 sh4_accept_interrupt();
    47                 code = NULL;
    48             }
    49             if( sh4r.slice_cycle >= nanosecs )
    50                 return nanosecs;
    51         }
    53         if( IS_SYSCALL(sh4r.pc) ) {
    54             uint32_t pc = sh4r.pc;
    55             sh4r.pc = sh4r.pr;
    56             sh4r.in_delay_slot = 0;
    57             syscall_invoke( pc );
    58         }
    60         code = xlat_get_code_by_vma( sh4r.pc );
    61         if( code == NULL || sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {
    62             code = sh4_translate_basic_block( sh4r.pc );
    63         }
    64         code();
    65     }
    66 }
    68 uint8_t *xlat_output;
    69 xlat_cache_block_t xlat_current_block;
    70 struct xlat_recovery_record xlat_recovery[MAX_RECOVERY_SIZE];
    71 uint32_t xlat_recovery_posn;
    73 void sh4_translate_add_recovery( uint32_t icount )
    74 {
    75     xlat_recovery[xlat_recovery_posn].xlat_offset = 
    76         ((uintptr_t)xlat_output) - ((uintptr_t)xlat_current_block->code);
    77     xlat_recovery[xlat_recovery_posn].sh4_icount = icount;
    78     xlat_recovery_posn++;
    79 }
    81 /**
    82  * Translate a linear basic block, ie all instructions from the start address
    83  * (inclusive) until the next branch/jump instruction or the end of the page
    84  * is reached.
    85  * @param start VMA of the block start (which must already be in the icache)
    86  * @return the address of the translated block
    87  * eg due to lack of buffer space.
    88  */
    89 void * sh4_translate_basic_block( sh4addr_t start )
    90 {
    91     sh4addr_t pc = start;
    92     sh4addr_t lastpc = (pc&0xFFFFF000)+0x1000;
    93     int done, i;
    94     xlat_current_block = xlat_start_block( GET_ICACHE_PHYS(start) );
    95     xlat_output = (uint8_t *)xlat_current_block->code;
    96     xlat_recovery_posn = 0;
    97     uint8_t *eob = xlat_output + xlat_current_block->size;
    99     if( GET_ICACHE_END() < lastpc ) {
   100         lastpc = GET_ICACHE_END();
   101     }
   103     sh4_translate_begin_block(pc);
   105     do {
   106         if( eob - xlat_output < MAX_INSTRUCTION_SIZE ) {
   107             uint8_t *oldstart = xlat_current_block->code;
   108             xlat_current_block = xlat_extend_block( xlat_output - oldstart + MAX_INSTRUCTION_SIZE );
   109             xlat_output = xlat_current_block->code + (xlat_output - oldstart);
   110             eob = xlat_current_block->code + xlat_current_block->size;
   111         }
   112         done = sh4_translate_instruction( pc ); 
   113         assert( xlat_output <= eob );
   114         pc += 2;
   115         if ( pc >= lastpc ) {
   116             done = 2;
   117         }
   118 #ifdef SINGLESTEP
   119         if( !done ) done = 2;
   120 #endif
   121     } while( !done );
   122     pc += (done - 2);
   124     // Add end-of-block recovery for post-instruction checks
   125     sh4_translate_add_recovery( (pc - start)>>1 ); 
   127     int epilogue_size = sh4_translate_end_block_size();
   128     uint32_t recovery_size = sizeof(struct xlat_recovery_record)*xlat_recovery_posn;
   129     uint32_t finalsize = (xlat_output - xlat_current_block->code) + epilogue_size + recovery_size;
   130     if( xlat_current_block->size < finalsize ) {
   131         uint8_t *oldstart = xlat_current_block->code;
   132         xlat_current_block = xlat_extend_block( finalsize );
   133         xlat_output = xlat_current_block->code + (xlat_output - oldstart);
   134     }	
   135     sh4_translate_end_block(pc);
   136     assert( xlat_output <= (xlat_current_block->code + xlat_current_block->size - recovery_size) );
   138     /* Write the recovery records onto the end of the code block */
   139     memcpy( xlat_output, xlat_recovery, recovery_size);
   140     xlat_current_block->recover_table_offset = xlat_output - (uint8_t *)xlat_current_block->code;
   141     xlat_current_block->recover_table_size = xlat_recovery_posn;
   142     xlat_current_block->xlat_sh4_mode = sh4r.xlat_sh4_mode;
   143     xlat_commit_block( finalsize, pc-start );
   144     return xlat_current_block->code;
   145 }
   147 /**
   148  * "Execute" the supplied recovery record. Currently this only updates
   149  * sh4r.pc and sh4r.slice_cycle according to the currently executing
   150  * instruction. In future this may be more sophisticated (ie will
   151  * call into generated code).
   152  */
   153 void sh4_translate_run_recovery( xlat_recovery_record_t recovery )
   154 {
   155     sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
   156     sh4r.pc += (recovery->sh4_icount<<1);
   157 }
   159 /**
   160  * Same as sh4_translate_run_recovery, but is used to recover from a taken
   161  * exception - that is, it fixes sh4r.spc rather than sh4r.pc
   162  */
   163 void sh4_translate_run_exception_recovery( xlat_recovery_record_t recovery )
   164 {
   165     sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
   166     sh4r.spc += (recovery->sh4_icount<<1);
   167 }    
   169 void sh4_translate_exit_recover( )
   170 {
   171     void *code = xlat_get_code_by_vma( sh4r.pc );
   172     if( code != NULL ) {
   173         uint32_t size = xlat_get_code_size( code );
   174         void *pc = xlat_get_native_pc( code, size );
   175         if( pc != NULL ) {
   176             // could be null if we're not actually running inside the translator
   177             xlat_recovery_record_t recover = xlat_get_pre_recovery(code, pc);
   178             if( recover != NULL ) {
   179                 // Can be null if there is no recovery necessary
   180                 sh4_translate_run_recovery(recover);
   181             }
   182         }
   183     }
   184 }
   186 void sh4_translate_exception_exit_recover( )
   187 {
   188     void *code = xlat_get_code_by_vma( sh4r.spc );
   189     if( code != NULL ) {
   190         uint32_t size = xlat_get_code_size( code );
   191         void *pc = xlat_get_native_pc( code, size );
   192         if( pc != NULL ) {
   193             // could be null if we're not actually running inside the translator
   194             xlat_recovery_record_t recover = xlat_get_pre_recovery(code, pc);
   195             if( recover != NULL ) {
   196                 // Can be null if there is no recovery necessary
   197                 sh4_translate_run_exception_recovery(recover);
   198             }
   199         }
   200     }
   202 }
   204 void FASTCALL sh4_translate_breakpoint_hit(uint32_t pc)
   205 {
   206     if( sh4_starting && sh4r.slice_cycle == 0 && pc == sh4r.pc ) {
   207         return;
   208     }
   209     sh4_core_exit( CORE_EXIT_BREAKPOINT );
   210 }
   212 void * FASTCALL xlat_get_code_by_vma( sh4vma_t vma )
   213 {
   214     void *result = NULL;
   216     if( IS_IN_ICACHE(vma) ) {
   217         return xlat_get_code( GET_ICACHE_PHYS(vma) );
   218     }
   220     if( IS_SYSCALL(vma) ) {
   221         // lxdream hook
   222         return NULL;
   223     }
   225     if( !mmu_update_icache(vma) ) {
   226         // fault - off to the fault handler
   227         if( !mmu_update_icache(sh4r.pc) ) {
   228             // double fault - halt
   229             ERROR( "Double fault - halting" );
   230             sh4_core_exit(CORE_EXIT_HALT);
   231             return NULL;
   232         }
   233     }
   235     assert( IS_IN_ICACHE(sh4r.pc) );
   236     result = xlat_get_code( GET_ICACHE_PHYS(sh4r.pc) );
   237     return result;
   238 }
   240 /**
   241  * Crashdump translation information.
   242  *
   243  * Print out the currently executing block (if any), in source and target
   244  * assembly.
   245  *
   246  * Note: we want to be _really_ careful not to cause a second-level crash
   247  * at this point (e.g. if the lookup tables are corrupted...)
   248  */
   249 void sh4_translate_crashdump()
   250 {
   251     if( !IS_IN_ICACHE(sh4r.pc) ) {
   252         /** If we're crashing due to an icache lookup failure, we'll probably
   253          * hit this case - just complain and return.
   254          */
   255         fprintf( stderr, "** SH4 PC not in current instruction region **\n" );
   256         return;
   257     }
   258     uint32_t pma = GET_ICACHE_PHYS(sh4r.pc);
   259     void *code = xlat_get_code( pma );
   260     if( code == NULL ) {
   261         fprintf( stderr, "** No translated block for current SH4 PC **\n" );
   262         return;
   263     }
   265     /* Sanity check on the code pointer */
   266     if( !xlat_is_code_pointer(code) ) {
   267         fprintf( stderr, "** Possibly corrupt translation cache **\n" );
   268         return;
   269     }
   271     void *native_pc = xlat_get_native_pc( code, xlat_get_code_size(code) );
   272     sh4_translate_disasm_block( stderr, code, sh4r.pc, native_pc );
   273 }
   275 /**
   276  * Dual-dump the translated block and original SH4 code for the basic block
   277  * starting at sh4_pc. If there is no translated block, this prints an error
   278  * and returns.
   279  */
   280 void sh4_translate_dump_block( uint32_t sh4_pc )
   281 {
   282     if( !IS_IN_ICACHE(sh4_pc) ) {
   283         fprintf( stderr, "** Address %08x not in current instruction region **\n", sh4_pc );
   284         return;
   285     }
   286     uint32_t pma = GET_ICACHE_PHYS(sh4_pc);
   287     void *code = xlat_get_code( pma );
   288     if( code == NULL ) {
   289         fprintf( stderr, "** No translated block for address %08x **\n", sh4_pc );
   290         return;
   291     }
   292     sh4_translate_disasm_block( stderr, code, sh4_pc, NULL );
   293 }
.