Search
lxdream.org :: lxdream/src/sh4/sh4trans.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/sh4trans.c
changeset 1112:4cac5e474d4c
prev1103:de9ad2c0cf56
next1125:9dd5dee45db9
author nkeynes
date Fri Sep 10 08:50:55 2010 +1000 (13 years ago)
permissions -rw-r--r--
last change Add missing sh4_translate_breakpoint_hit to the symbol table
Change asm() to __asm__() as it's more likely to work
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * SH4 translation core module. This part handles the non-target-specific
     5  * section of the translation.
     6  *
     7  * Copyright (c) 2005 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    19 #include <assert.h>
    20 #include "eventq.h"
    21 #include "syscall.h"
    22 #include "clock.h"
    23 #include "dreamcast.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "sh4/sh4mmio.h"
    27 #include "sh4/mmu.h"
    28 #include "xlat/xltcache.h"
    30 /**
    31  * Execute a timeslice using translated code only (ie translate/execute loop)
    32  */
    33 uint32_t sh4_translate_run_slice( uint32_t nanosecs ) 
    34 {
    35     void * (*code)() = NULL;
    36     event_schedule( EVENT_ENDTIMESLICE, nanosecs );
    37     for(;;) {
    38         if( sh4r.event_pending <= sh4r.slice_cycle ) {
    39             if( sh4r.event_types & PENDING_EVENT ) {
    40                 event_execute();
    41             }
    42             /* Eventq execute may (quite likely) deliver an immediate IRQ */
    43             if( sh4r.event_types & PENDING_IRQ ) {
    44                 sh4_accept_interrupt();
    45                 code = NULL;
    46             }
    47             if( sh4r.slice_cycle >= nanosecs )
    48                 return nanosecs;
    49         }
    51         if( IS_SYSCALL(sh4r.pc) ) {
    52             uint32_t pc = sh4r.pc;
    53             sh4r.pc = sh4r.pr;
    54             sh4r.in_delay_slot = 0;
    55             syscall_invoke( pc );
    56         }
    58         code = xlat_get_code_by_vma( sh4r.pc );
    59         if( code == NULL || sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(code) ) {
    60             code = sh4_translate_basic_block( sh4r.pc );
    61         }
    62         code();
    63     }
    64 }
    66 uint8_t *xlat_output;
    67 xlat_cache_block_t xlat_current_block;
    68 struct xlat_recovery_record xlat_recovery[MAX_RECOVERY_SIZE];
    69 uint32_t xlat_recovery_posn;
    71 void sh4_translate_add_recovery( uint32_t icount )
    72 {
    73     xlat_recovery[xlat_recovery_posn].xlat_offset = 
    74         ((uintptr_t)xlat_output) - ((uintptr_t)xlat_current_block->code);
    75     xlat_recovery[xlat_recovery_posn].sh4_icount = icount;
    76     xlat_recovery_posn++;
    77 }
    79 /**
    80  * Translate a linear basic block, ie all instructions from the start address
    81  * (inclusive) until the next branch/jump instruction or the end of the page
    82  * is reached.
    83  * @param start VMA of the block start (which must already be in the icache)
    84  * @return the address of the translated block
    85  * eg due to lack of buffer space.
    86  */
    87 void * sh4_translate_basic_block( sh4addr_t start )
    88 {
    89     sh4addr_t pc = start;
    90     sh4addr_t lastpc = (pc&0xFFFFF000)+0x1000;
    91     int done, i;
    92     xlat_current_block = xlat_start_block( GET_ICACHE_PHYS(start) );
    93     xlat_output = (uint8_t *)xlat_current_block->code;
    94     xlat_recovery_posn = 0;
    95     uint8_t *eob = xlat_output + xlat_current_block->size;
    97     if( GET_ICACHE_END() < lastpc ) {
    98         lastpc = GET_ICACHE_END();
    99     }
   101     sh4_translate_begin_block(pc);
   103     do {
   104         if( eob - xlat_output < MAX_INSTRUCTION_SIZE ) {
   105             uint8_t *oldstart = xlat_current_block->code;
   106             xlat_current_block = xlat_extend_block( xlat_output - oldstart + MAX_INSTRUCTION_SIZE );
   107             xlat_output = xlat_current_block->code + (xlat_output - oldstart);
   108             eob = xlat_current_block->code + xlat_current_block->size;
   109         }
   110         done = sh4_translate_instruction( pc ); 
   111         assert( xlat_output <= eob );
   112         pc += 2;
   113         if ( pc >= lastpc ) {
   114             done = 2;
   115         }
   116     } while( !done );
   117     pc += (done - 2);
   119     // Add end-of-block recovery for post-instruction checks
   120     sh4_translate_add_recovery( (pc - start)>>1 ); 
   122     int epilogue_size = sh4_translate_end_block_size();
   123     uint32_t recovery_size = sizeof(struct xlat_recovery_record)*xlat_recovery_posn;
   124     uint32_t finalsize = (xlat_output - xlat_current_block->code) + epilogue_size + recovery_size;
   125     if( xlat_current_block->size < finalsize ) {
   126         uint8_t *oldstart = xlat_current_block->code;
   127         xlat_current_block = xlat_extend_block( finalsize );
   128         xlat_output = xlat_current_block->code + (xlat_output - oldstart);
   129     }	
   130     sh4_translate_end_block(pc);
   131     assert( xlat_output <= (xlat_current_block->code + xlat_current_block->size - recovery_size) );
   133     /* Write the recovery records onto the end of the code block */
   134     memcpy( xlat_output, xlat_recovery, recovery_size);
   135     xlat_current_block->recover_table_offset = xlat_output - (uint8_t *)xlat_current_block->code;
   136     xlat_current_block->recover_table_size = xlat_recovery_posn;
   137     xlat_current_block->xlat_sh4_mode = sh4r.xlat_sh4_mode;
   138     xlat_commit_block( finalsize, pc-start );
   139     return xlat_current_block->code;
   140 }
   142 /**
   143  * "Execute" the supplied recovery record. Currently this only updates
   144  * sh4r.pc and sh4r.slice_cycle according to the currently executing
   145  * instruction. In future this may be more sophisticated (ie will
   146  * call into generated code).
   147  */
   148 void sh4_translate_run_recovery( xlat_recovery_record_t recovery )
   149 {
   150     sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
   151     sh4r.pc += (recovery->sh4_icount<<1);
   152 }
   154 /**
   155  * Same as sh4_translate_run_recovery, but is used to recover from a taken
   156  * exception - that is, it fixes sh4r.spc rather than sh4r.pc
   157  */
   158 void sh4_translate_run_exception_recovery( xlat_recovery_record_t recovery )
   159 {
   160     sh4r.slice_cycle += (recovery->sh4_icount * sh4_cpu_period);
   161     sh4r.spc += (recovery->sh4_icount<<1);
   162 }    
   164 void sh4_translate_exit_recover( )
   165 {
   166     void *code = xlat_get_code_by_vma( sh4r.pc );
   167     if( code != NULL ) {
   168         uint32_t size = xlat_get_code_size( code );
   169         void *pc = xlat_get_native_pc( code, size );
   170         if( pc != NULL ) {
   171             // could be null if we're not actually running inside the translator
   172             xlat_recovery_record_t recover = xlat_get_pre_recovery(code, pc);
   173             if( recover != NULL ) {
   174                 // Can be null if there is no recovery necessary
   175                 sh4_translate_run_recovery(recover);
   176             }
   177         }
   178     }
   179 }
   181 void sh4_translate_exception_exit_recover( )
   182 {
   183     void *code = xlat_get_code_by_vma( sh4r.spc );
   184     if( code != NULL ) {
   185         uint32_t size = xlat_get_code_size( code );
   186         void *pc = xlat_get_native_pc( code, size );
   187         if( pc != NULL ) {
   188             // could be null if we're not actually running inside the translator
   189             xlat_recovery_record_t recover = xlat_get_pre_recovery(code, pc);
   190             if( recover != NULL ) {
   191                 // Can be null if there is no recovery necessary
   192                 sh4_translate_run_exception_recovery(recover);
   193             }
   194         }
   195     }
   197 }
   199 void FASTCALL sh4_translate_breakpoint_hit(uint32_t pc)
   200 {
   201     if( sh4_starting && sh4r.slice_cycle == 0 && pc == sh4r.pc ) {
   202         return;
   203     }
   204     sh4_core_exit( CORE_EXIT_BREAKPOINT );
   205 }
   207 void * FASTCALL xlat_get_code_by_vma( sh4vma_t vma )
   208 {
   209     void *result = NULL;
   211     if( IS_IN_ICACHE(vma) ) {
   212         return xlat_get_code( GET_ICACHE_PHYS(vma) );
   213     }
   215     if( IS_SYSCALL(vma) ) {
   216         // lxdream hook
   217         return NULL;
   218     }
   220     if( !mmu_update_icache(vma) ) {
   221         // fault - off to the fault handler
   222         if( !mmu_update_icache(sh4r.pc) ) {
   223             // double fault - halt
   224             ERROR( "Double fault - halting" );
   225             sh4_core_exit(CORE_EXIT_HALT);
   226             return NULL;
   227         }
   228     }
   230     assert( IS_IN_ICACHE(sh4r.pc) );
   231     result = xlat_get_code( GET_ICACHE_PHYS(sh4r.pc) );
   232     return result;
   233 }
   235 /**
   236  * Crashdump translation information.
   237  *
   238  * Print out the currently executing block (if any), in source and target
   239  * assembly.
   240  *
   241  * Note: we want to be _really_ careful not to cause a second-level crash
   242  * at this point (e.g. if the lookup tables are corrupted...)
   243  */
   244 void sh4_translate_crashdump()
   245 {
   246     if( !IS_IN_ICACHE(sh4r.pc) ) {
   247         /** If we're crashing due to an icache lookup failure, we'll probably
   248          * hit this case - just complain and return.
   249          */
   250         fprintf( stderr, "** SH4 PC not in current instruction region **\n" );
   251         return;
   252     }
   253     uint32_t pma = GET_ICACHE_PHYS(sh4r.pc);
   254     void *code = xlat_get_code( pma );
   255     if( code == NULL ) {
   256         fprintf( stderr, "** No translated block for current SH4 PC **\n" );
   257         return;
   258     }
   260     /* Sanity check on the code pointer */
   261     if( !xlat_is_code_pointer(code) ) {
   262         fprintf( stderr, "** Possibly corrupt translation cache **\n" );
   263         return;
   264     }
   266     void *native_pc = xlat_get_native_pc( code, xlat_get_code_size(code) );
   267     sh4_translate_disasm_block( stderr, code, sh4r.pc, native_pc );
   268 }
   270 /**
   271  * Dual-dump the translated block and original SH4 code for the basic block
   272  * starting at sh4_pc. If there is no translated block, this prints an error
   273  * and returns.
   274  */
   275 void sh4_translate_dump_block( uint32_t sh4_pc )
   276 {
   277     if( !IS_IN_ICACHE(sh4_pc) ) {
   278         fprintf( stderr, "** Address %08x not in current instruction region **\n", sh4_pc );
   279         return;
   280     }
   281     uint32_t pma = GET_ICACHE_PHYS(sh4_pc);
   282     void *code = xlat_get_code( pma );
   283     if( code == NULL ) {
   284         fprintf( stderr, "** No translated block for address %08x **\n", sh4_pc );
   285         return;
   286     }
   287     sh4_translate_disasm_block( stderr, code, sh4_pc, NULL );
   288 }
.