Search
lxdream.org :: lxdream/src/sh4/sh4x86.in :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/sh4x86.in
changeset 1125:9dd5dee45db9
prev1120:7c40a0f687b3
next1146:76c5d1064262
author nkeynes
date Fri Sep 17 20:04:02 2010 +1000 (13 years ago)
permissions -rw-r--r--
last change Add missing shadow.c
file annotate diff log raw
1.1 --- a/src/sh4/sh4x86.in Fri Sep 10 08:50:55 2010 +1000
1.2 +++ b/src/sh4/sh4x86.in Fri Sep 17 20:04:02 2010 +1000
1.3 @@ -97,9 +97,16 @@
1.4 uint32_t sh4_mode; /* Mirror of sh4r.xlat_sh4_mode */
1.5 int tstate;
1.6
1.7 - /* mode flags */
1.8 + /* mode settings */
1.9 gboolean tlb_on; /* True if tlb translation is active */
1.10 + struct mem_region_fn **priv_address_space;
1.11 + struct mem_region_fn **user_address_space;
1.12
1.13 + /* Instrumentation */
1.14 + xlat_block_begin_callback_t begin_callback;
1.15 + xlat_block_end_callback_t end_callback;
1.16 + gboolean fastmem;
1.17 +
1.18 /* Allocated memory for the (block-wide) back-patch list */
1.19 struct backpatch_record *backpatch_list;
1.20 uint32_t backpatch_posn;
1.21 @@ -117,8 +124,8 @@
1.22 { "sh4r+128", ((char *)&sh4r)+128 },
1.23 { "sh4_cpu_period", &sh4_cpu_period },
1.24 { "sh4_address_space", NULL },
1.25 + { "sh4_user_address_space", NULL },
1.26 { "sh4_translate_breakpoint_hit", sh4_translate_breakpoint_hit },
1.27 - { "sh4_user_address_space", NULL },
1.28 { "sh4_write_fpscr", sh4_write_fpscr },
1.29 { "sh4_write_sr", sh4_write_sr },
1.30 { "sh4_read_sr", sh4_read_sr },
1.31 @@ -143,17 +150,38 @@
1.32 return (features & 1) ? TRUE : FALSE;
1.33 }
1.34
1.35 +void sh4_translate_set_address_space( struct mem_region_fn **priv, struct mem_region_fn **user )
1.36 +{
1.37 + sh4_x86.priv_address_space = priv;
1.38 + sh4_x86.user_address_space = user;
1.39 + x86_symbol_table[2].ptr = priv;
1.40 + x86_symbol_table[3].ptr = user;
1.41 +}
1.42 +
1.43 void sh4_translate_init(void)
1.44 {
1.45 sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE);
1.46 sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(struct backpatch_record);
1.47 + sh4_x86.begin_callback = NULL;
1.48 + sh4_x86.end_callback = NULL;
1.49 + sh4_translate_set_address_space( sh4_address_space, sh4_user_address_space );
1.50 + sh4_x86.fastmem = TRUE;
1.51 sh4_x86.sse3_enabled = is_sse3_supported();
1.52 - x86_symbol_table[2].ptr = sh4_address_space;
1.53 - x86_symbol_table[3].ptr = sh4_user_address_space;
1.54 x86_disasm_init();
1.55 x86_set_symtab( x86_symbol_table, sizeof(x86_symbol_table)/sizeof(struct x86_symbol) );
1.56 }
1.57
1.58 +void sh4_translate_set_callbacks( xlat_block_begin_callback_t begin, xlat_block_end_callback_t end )
1.59 +{
1.60 + sh4_x86.begin_callback = begin;
1.61 + sh4_x86.end_callback = end;
1.62 +}
1.63 +
1.64 +void sh4_translate_set_fastmem( gboolean flag )
1.65 +{
1.66 + sh4_x86.fastmem = flag;
1.67 +}
1.68 +
1.69 /**
1.70 * Disassemble the given translated code block, and it's source SH4 code block
1.71 * side-by-side. The current native pc will be marked if non-null.
1.72 @@ -371,7 +399,7 @@
1.73 TESTL_imms_r32( 0x00000007, x86reg ); \
1.74 JNE_exc(EXC_DATA_ADDR_WRITE);
1.75
1.76 -#define address_space() ((sh4_x86.sh4_mode&SR_MD) ? (uintptr_t)sh4_address_space : (uintptr_t)sh4_user_address_space)
1.77 +#define address_space() ((sh4_x86.sh4_mode&SR_MD) ? (uintptr_t)sh4_x86.priv_address_space : (uintptr_t)sh4_x86.user_address_space)
1.78
1.79 #define UNDEF(ir)
1.80 /* Note: For SR.MD == 1 && MMUCR.AT == 0, there are no memory exceptions, so
1.81 @@ -461,7 +489,10 @@
1.82 sh4_x86.double_prec = sh4r.fpscr & FPSCR_PR;
1.83 sh4_x86.double_size = sh4r.fpscr & FPSCR_SZ;
1.84 sh4_x86.sh4_mode = sh4r.xlat_sh4_mode;
1.85 - enter_block();
1.86 + emit_prologue();
1.87 + if( sh4_x86.begin_callback ) {
1.88 + CALL_ptr( sh4_x86.begin_callback );
1.89 + }
1.90 }
1.91
1.92
1.93 @@ -508,10 +539,30 @@
1.94 }
1.95 JNE_label(wrongmode);
1.96 LEAP_rptrdisp_rptr(REG_EAX, PROLOGUE_SIZE,REG_EAX);
1.97 - JMP_rptr(REG_EAX);
1.98 + if( sh4_x86.end_callback ) {
1.99 + /* Note this does leave the stack out of alignment, but doesn't matter
1.100 + * for what we're currently using it for.
1.101 + */
1.102 + PUSH_r32(REG_EAX);
1.103 + MOVP_immptr_rptr(sh4_x86.end_callback, REG_ECX);
1.104 + JMP_rptr(REG_ECX);
1.105 + } else {
1.106 + JMP_rptr(REG_EAX);
1.107 + }
1.108 JMP_TARGET(nocode); JMP_TARGET(wrongmode);
1.109 }
1.110
1.111 +static void exit_block()
1.112 +{
1.113 + emit_epilogue();
1.114 + if( sh4_x86.end_callback ) {
1.115 + MOVP_immptr_rptr(sh4_x86.end_callback, REG_ECX);
1.116 + JMP_rptr(REG_ECX);
1.117 + } else {
1.118 + RET();
1.119 + }
1.120 +}
1.121 +
1.122 /**
1.123 * Exit the block with sh4r.pc already written
1.124 */
1.125 @@ -1616,7 +1667,7 @@
1.126 SLOTILLEGAL();
1.127 } else {
1.128 uint32_t target = (pc & 0xFFFFFFFC) + disp + 4;
1.129 - if( IS_IN_ICACHE(target) ) {
1.130 + if( sh4_x86.fastmem && IS_IN_ICACHE(target) ) {
1.131 // If the target address is in the same page as the code, it's
1.132 // pretty safe to just ref it directly and circumvent the whole
1.133 // memory subsystem. (this is a big performance win)
1.134 @@ -1738,7 +1789,7 @@
1.135 } else {
1.136 // See comments for MOV.L @(disp, PC), Rn
1.137 uint32_t target = pc + disp + 4;
1.138 - if( IS_IN_ICACHE(target) ) {
1.139 + if( sh4_x86.fastmem && IS_IN_ICACHE(target) ) {
1.140 sh4ptr_t ptr = GET_ICACHE_PTR(target);
1.141 MOVL_moffptr_eax( ptr );
1.142 MOVSXL_r16_r32( REG_EAX, REG_EAX );
.