Search
lxdream.org :: lxdream :: r596:dfc0c93d882e
lxdream 0.9.1
released Jun 29
Download Now
changeset596:dfc0c93d882e
parent595:02d86b836ef0
child597:87cbdf62aa35
authornkeynes
dateMon Jan 21 11:59:46 2008 +0000 (16 years ago)
Fix MAC.L/MAC.W stack issues
Fix various recovery-table issues
src/sh4/ia32abi.h
src/sh4/ia32mac.h
src/sh4/ia64abi.h
src/sh4/sh4trans.c
src/sh4/sh4x86.c
src/sh4/sh4x86.in
src/sh4/xltcache.c
src/sh4/xltcache.h
1.1 --- a/src/sh4/ia32abi.h Mon Jan 21 11:54:47 2008 +0000
1.2 +++ b/src/sh4/ia32abi.h Mon Jan 21 11:59:46 2008 +0000
1.3 @@ -105,7 +105,6 @@
1.4 sh4_x86.fpuen_checked = FALSE;
1.5 sh4_x86.branch_taken = FALSE;
1.6 sh4_x86.backpatch_posn = 0;
1.7 - sh4_x86.recovery_posn = 0;
1.8 sh4_x86.block_start_pc = pc;
1.9 sh4_x86.tlb_on = IS_MMU_ENABLED();
1.10 sh4_x86.tstate = TSTATE_NONE;
1.11 @@ -247,8 +246,12 @@
1.12 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
1.13 *sh4_x86.backpatch_list[i].fixup_addr =
1.14 xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4;
1.15 - if( sh4_x86.backpatch_list[i].exc_code == -1 ) {
1.16 + if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
1.17 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.18 + int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
1.19 + if( stack_adj > 0 ) {
1.20 + ADD_imm8s_r32( stack_adj, R_ESP );
1.21 + }
1.22 int rel = preexc_ptr - xlat_output;
1.23 JMP_rel(rel);
1.24 } else {
2.1 --- a/src/sh4/ia32mac.h Mon Jan 21 11:54:47 2008 +0000
2.2 +++ b/src/sh4/ia32mac.h Mon Jan 21 11:59:46 2008 +0000
2.3 @@ -271,8 +271,12 @@
2.4 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
2.5 *sh4_x86.backpatch_list[i].fixup_addr =
2.6 xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4;
2.7 - if( sh4_x86.backpatch_list[i].exc_code == -1 ) {
2.8 + if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
2.9 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
2.10 + int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
2.11 + if( stack_adj > 0 ) {
2.12 + ADD_imm8s_r32( stack_adj, R_ESP );
2.13 + }
2.14 int rel = preexc_ptr - xlat_output;
2.15 JMP_rel(rel);
2.16 } else {
3.1 --- a/src/sh4/ia64abi.h Mon Jan 21 11:54:47 2008 +0000
3.2 +++ b/src/sh4/ia64abi.h Mon Jan 21 11:59:46 2008 +0000
3.3 @@ -239,8 +239,12 @@
3.4 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
3.5 *sh4_x86.backpatch_list[i].fixup_addr =
3.6 xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4;
3.7 - if( sh4_x86.backpatch_list[i].exc_code == -1 ) {
3.8 + if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
3.9 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
3.10 + int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
3.11 + if( stack_adj > 0 ) {
3.12 + ADD_imm8s_r32( stack_adj, R_ESP );
3.13 + }
3.14 int rel = preexc_ptr - xlat_output;
3.15 JMP_rel(rel);
3.16 } else {
4.1 --- a/src/sh4/sh4trans.c Mon Jan 21 11:54:47 2008 +0000
4.2 +++ b/src/sh4/sh4trans.c Mon Jan 21 11:59:46 2008 +0000
4.3 @@ -88,9 +88,9 @@
4.4 code = xlat_get_code_by_vma( sh4r.pc );
4.5 if( code == NULL ) {
4.6 code = sh4_translate_basic_block( sh4r.pc );
4.7 -// xlat_check_integrity();
4.8 }
4.9 }
4.10 + uint32_t oldpc = sh4r.pc;
4.11 code = code();
4.12 }
4.13
4.14 @@ -105,9 +105,18 @@
4.15 }
4.16
4.17 uint8_t *xlat_output;
4.18 +xlat_cache_block_t xlat_current_block;
4.19 struct xlat_recovery_record xlat_recovery[MAX_RECOVERY_SIZE];
4.20 uint32_t xlat_recovery_posn;
4.21
4.22 +void sh4_translate_add_recovery( uint32_t icount )
4.23 +{
4.24 + xlat_recovery[xlat_recovery_posn].xlat_offset =
4.25 + ((uintptr_t)xlat_output) - ((uintptr_t)xlat_current_block->code);
4.26 + xlat_recovery[xlat_recovery_posn].sh4_icount = icount;
4.27 + xlat_recovery_posn++;
4.28 +}
4.29 +
4.30 /**
4.31 * Translate a linear basic block, ie all instructions from the start address
4.32 * (inclusive) until the next branch/jump instruction or the end of the page
4.33 @@ -120,10 +129,10 @@
4.34 sh4addr_t pc = start;
4.35 sh4addr_t lastpc = (pc&0xFFFFF000)+0x1000;
4.36 int done, i;
4.37 - xlat_cache_block_t block = xlat_start_block( start );
4.38 - xlat_output = (uint8_t *)block->code;
4.39 + xlat_current_block = xlat_start_block( start );
4.40 + xlat_output = (uint8_t *)xlat_current_block->code;
4.41 xlat_recovery_posn = 0;
4.42 - uint8_t *eob = xlat_output + block->size;
4.43 + uint8_t *eob = xlat_output + xlat_current_block->size;
4.44
4.45 if( GET_ICACHE_END() < lastpc ) {
4.46 lastpc = GET_ICACHE_END();
4.47 @@ -140,10 +149,10 @@
4.48 }
4.49 }
4.50 if( eob - xlat_output < MAX_INSTRUCTION_SIZE ) {
4.51 - uint8_t *oldstart = block->code;
4.52 - block = xlat_extend_block( xlat_output - oldstart + MAX_INSTRUCTION_SIZE );
4.53 - xlat_output = block->code + (xlat_output - oldstart);
4.54 - eob = block->code + block->size;
4.55 + uint8_t *oldstart = xlat_current_block->code;
4.56 + xlat_current_block = xlat_extend_block( xlat_output - oldstart + MAX_INSTRUCTION_SIZE );
4.57 + xlat_output = xlat_current_block->code + (xlat_output - oldstart);
4.58 + eob = xlat_current_block->code + xlat_current_block->size;
4.59 }
4.60 done = sh4_translate_instruction( pc );
4.61 assert( xlat_output <= eob );
4.62 @@ -155,20 +164,20 @@
4.63 pc += (done - 2);
4.64 int epilogue_size = sh4_translate_end_block_size();
4.65 uint32_t recovery_size = sizeof(struct xlat_recovery_record)*xlat_recovery_posn;
4.66 - uint32_t finalsize = xlat_output - block->code + epilogue_size + recovery_size;
4.67 + uint32_t finalsize = xlat_output - xlat_current_block->code + epilogue_size + recovery_size;
4.68 if( eob - xlat_output < finalsize ) {
4.69 - uint8_t *oldstart = block->code;
4.70 - block = xlat_extend_block( finalsize );
4.71 - xlat_output = block->code + (xlat_output - oldstart);
4.72 + uint8_t *oldstart = xlat_current_block->code;
4.73 + xlat_current_block = xlat_extend_block( finalsize );
4.74 + xlat_output = xlat_current_block->code + (xlat_output - oldstart);
4.75 }
4.76 sh4_translate_end_block(pc);
4.77
4.78 /* Write the recovery records onto the end of the code block */
4.79 memcpy( xlat_output, xlat_recovery, recovery_size);
4.80 - block->recover_table_offset = xlat_output - (uint8_t *)block->code;
4.81 - block->recover_table_size = xlat_recovery_posn;
4.82 + xlat_current_block->recover_table_offset = xlat_output - (uint8_t *)xlat_current_block->code;
4.83 + xlat_current_block->recover_table_size = xlat_recovery_posn;
4.84 xlat_commit_block( finalsize, pc-start );
4.85 - return block->code;
4.86 + return xlat_current_block->code;
4.87 }
4.88
4.89 /**
5.1 --- a/src/sh4/sh4x86.c Mon Jan 21 11:54:47 2008 +0000
5.2 +++ b/src/sh4/sh4x86.c Mon Jan 21 11:59:46 2008 +0000
5.3 @@ -37,7 +37,7 @@
5.4 struct backpatch_record {
5.5 uint32_t *fixup_addr;
5.6 uint32_t fixup_icount;
5.7 - uint32_t exc_code;
5.8 + int32_t exc_code;
5.9 };
5.10
5.11 #define MAX_RECOVERY_SIZE 2048
5.12 @@ -67,8 +67,6 @@
5.13 struct backpatch_record *backpatch_list;
5.14 uint32_t backpatch_posn;
5.15 uint32_t backpatch_size;
5.16 - struct xlat_recovery_record recovery_list[MAX_RECOVERY_SIZE];
5.17 - uint32_t recovery_posn;
5.18 };
5.19
5.20 #define TSTATE_NONE -1
5.21 @@ -123,13 +121,6 @@
5.22 sh4_x86.backpatch_posn++;
5.23 }
5.24
5.25 -void sh4_x86_add_recovery( uint32_t pc )
5.26 -{
5.27 - xlat_recovery[xlat_recovery_posn].xlat_pc = (uintptr_t)xlat_output;
5.28 - xlat_recovery[xlat_recovery_posn].sh4_icount = (pc - sh4_x86.block_start_pc)>>1;
5.29 - xlat_recovery_posn++;
5.30 -}
5.31 -
5.32 /**
5.33 * Emit an instruction to load an SH4 reg into a real register
5.34 */
5.35 @@ -336,6 +327,8 @@
5.36 * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
5.37 */
5.38 #define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
5.39 +
5.40 +#define MMU_TRANSLATE_READ_EXC( addr_reg, exc_code ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(exc_code); MEM_RESULT(addr_reg) }
5.41 /**
5.42 * Perform MMU translation on the address in addr_reg for a write operation, iff the TLB is turned
5.43 * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
5.44 @@ -361,7 +354,11 @@
5.45
5.46 uint32_t sh4_translate_end_block_size()
5.47 {
5.48 - return EPILOGUE_SIZE + (sh4_x86.backpatch_posn*12);
5.49 + if( sh4_x86.backpatch_posn <= 3 ) {
5.50 + return EPILOGUE_SIZE + (sh4_x86.backpatch_posn*12);
5.51 + } else {
5.52 + return EPILOGUE_SIZE + 48 + (sh4_x86.backpatch_posn-3)*15;
5.53 + }
5.54 }
5.55
5.56
5.57 @@ -428,7 +425,7 @@
5.58 */
5.59
5.60 if( !sh4_x86.in_delay_slot ) {
5.61 - sh4_x86_add_recovery(pc);
5.62 + sh4_translate_add_recovery( (pc - sh4_x86.block_start_pc)>>1 );
5.63 }
5.64 switch( (ir&0xF000) >> 12 ) {
5.65 case 0x0:
5.66 @@ -867,7 +864,7 @@
5.67 PUSH_realigned_r32( R_EAX );
5.68 load_reg( R_EAX, Rn );
5.69 ADD_imm8s_r32( 4, R_EAX );
5.70 - MMU_TRANSLATE_READ( R_EAX );
5.71 + MMU_TRANSLATE_READ_EXC( R_EAX, -5 );
5.72 ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rn]) );
5.73 // Note translate twice in case of page boundaries. Maybe worth
5.74 // adding a page-boundary check to skip the second translation
5.75 @@ -875,10 +872,11 @@
5.76 load_reg( R_EAX, Rm );
5.77 check_ralign32( R_EAX );
5.78 MMU_TRANSLATE_READ( R_EAX );
5.79 + load_reg( R_ECX, Rn );
5.80 + check_ralign32( R_ECX );
5.81 PUSH_realigned_r32( R_EAX );
5.82 - load_reg( R_EAX, Rn );
5.83 - check_ralign32( R_EAX );
5.84 - MMU_TRANSLATE_READ( R_EAX );
5.85 + MMU_TRANSLATE_READ_EXC( R_ECX, -5 );
5.86 + MOV_r32_r32( R_ECX, R_EAX );
5.87 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
5.88 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
5.89 }
5.90 @@ -2126,7 +2124,7 @@
5.91 PUSH_realigned_r32( R_EAX );
5.92 load_reg( R_EAX, Rn );
5.93 ADD_imm8s_r32( 2, R_EAX );
5.94 - MMU_TRANSLATE_READ( R_EAX );
5.95 + MMU_TRANSLATE_READ_EXC( R_EAX, -5 );
5.96 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
5.97 // Note translate twice in case of page boundaries. Maybe worth
5.98 // adding a page-boundary check to skip the second translation
5.99 @@ -2134,10 +2132,11 @@
5.100 load_reg( R_EAX, Rm );
5.101 check_ralign16( R_EAX );
5.102 MMU_TRANSLATE_READ( R_EAX );
5.103 + load_reg( R_ECX, Rn );
5.104 + check_ralign16( R_ECX );
5.105 PUSH_realigned_r32( R_EAX );
5.106 - load_reg( R_EAX, Rn );
5.107 - check_ralign16( R_EAX );
5.108 - MMU_TRANSLATE_READ( R_EAX );
5.109 + MMU_TRANSLATE_READ_EXC( R_ECX, -5 );
5.110 + MOV_r32_r32( R_ECX, R_EAX );
5.111 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
5.112 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
5.113 }
6.1 --- a/src/sh4/sh4x86.in Mon Jan 21 11:54:47 2008 +0000
6.2 +++ b/src/sh4/sh4x86.in Mon Jan 21 11:59:46 2008 +0000
6.3 @@ -37,7 +37,7 @@
6.4 struct backpatch_record {
6.5 uint32_t *fixup_addr;
6.6 uint32_t fixup_icount;
6.7 - uint32_t exc_code;
6.8 + int32_t exc_code;
6.9 };
6.10
6.11 #define MAX_RECOVERY_SIZE 2048
6.12 @@ -67,8 +67,6 @@
6.13 struct backpatch_record *backpatch_list;
6.14 uint32_t backpatch_posn;
6.15 uint32_t backpatch_size;
6.16 - struct xlat_recovery_record recovery_list[MAX_RECOVERY_SIZE];
6.17 - uint32_t recovery_posn;
6.18 };
6.19
6.20 #define TSTATE_NONE -1
6.21 @@ -123,13 +121,6 @@
6.22 sh4_x86.backpatch_posn++;
6.23 }
6.24
6.25 -void sh4_x86_add_recovery( uint32_t pc )
6.26 -{
6.27 - xlat_recovery[xlat_recovery_posn].xlat_pc = (uintptr_t)xlat_output;
6.28 - xlat_recovery[xlat_recovery_posn].sh4_icount = (pc - sh4_x86.block_start_pc)>>1;
6.29 - xlat_recovery_posn++;
6.30 -}
6.31 -
6.32 /**
6.33 * Emit an instruction to load an SH4 reg into a real register
6.34 */
6.35 @@ -336,6 +327,8 @@
6.36 * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
6.37 */
6.38 #define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
6.39 +
6.40 +#define MMU_TRANSLATE_READ_EXC( addr_reg, exc_code ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(exc_code); MEM_RESULT(addr_reg) }
6.41 /**
6.42 * Perform MMU translation on the address in addr_reg for a write operation, iff the TLB is turned
6.43 * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
6.44 @@ -361,7 +354,11 @@
6.45
6.46 uint32_t sh4_translate_end_block_size()
6.47 {
6.48 - return EPILOGUE_SIZE + (sh4_x86.backpatch_posn*12);
6.49 + if( sh4_x86.backpatch_posn <= 3 ) {
6.50 + return EPILOGUE_SIZE + (sh4_x86.backpatch_posn*12);
6.51 + } else {
6.52 + return EPILOGUE_SIZE + 48 + (sh4_x86.backpatch_posn-3)*15;
6.53 + }
6.54 }
6.55
6.56
6.57 @@ -428,7 +425,7 @@
6.58 */
6.59
6.60 if( !sh4_x86.in_delay_slot ) {
6.61 - sh4_x86_add_recovery(pc);
6.62 + sh4_translate_add_recovery( (pc - sh4_x86.block_start_pc)>>1 );
6.63 }
6.64 %%
6.65 /* ALU operations */
6.66 @@ -654,7 +651,7 @@
6.67 PUSH_realigned_r32( R_EAX );
6.68 load_reg( R_EAX, Rn );
6.69 ADD_imm8s_r32( 4, R_EAX );
6.70 - MMU_TRANSLATE_READ( R_EAX );
6.71 + MMU_TRANSLATE_READ_EXC( R_EAX, -5 );
6.72 ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rn]) );
6.73 // Note translate twice in case of page boundaries. Maybe worth
6.74 // adding a page-boundary check to skip the second translation
6.75 @@ -662,10 +659,11 @@
6.76 load_reg( R_EAX, Rm );
6.77 check_ralign32( R_EAX );
6.78 MMU_TRANSLATE_READ( R_EAX );
6.79 + load_reg( R_ECX, Rn );
6.80 + check_ralign32( R_ECX );
6.81 PUSH_realigned_r32( R_EAX );
6.82 - load_reg( R_EAX, Rn );
6.83 - check_ralign32( R_EAX );
6.84 - MMU_TRANSLATE_READ( R_EAX );
6.85 + MMU_TRANSLATE_READ_EXC( R_ECX, -5 );
6.86 + MOV_r32_r32( R_ECX, R_EAX );
6.87 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
6.88 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
6.89 }
6.90 @@ -694,7 +692,7 @@
6.91 PUSH_realigned_r32( R_EAX );
6.92 load_reg( R_EAX, Rn );
6.93 ADD_imm8s_r32( 2, R_EAX );
6.94 - MMU_TRANSLATE_READ( R_EAX );
6.95 + MMU_TRANSLATE_READ_EXC( R_EAX, -5 );
6.96 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
6.97 // Note translate twice in case of page boundaries. Maybe worth
6.98 // adding a page-boundary check to skip the second translation
6.99 @@ -702,10 +700,11 @@
6.100 load_reg( R_EAX, Rm );
6.101 check_ralign16( R_EAX );
6.102 MMU_TRANSLATE_READ( R_EAX );
6.103 + load_reg( R_ECX, Rn );
6.104 + check_ralign16( R_ECX );
6.105 PUSH_realigned_r32( R_EAX );
6.106 - load_reg( R_EAX, Rn );
6.107 - check_ralign16( R_EAX );
6.108 - MMU_TRANSLATE_READ( R_EAX );
6.109 + MMU_TRANSLATE_READ_EXC( R_ECX, -5 );
6.110 + MOV_r32_r32( R_ECX, R_EAX );
6.111 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
6.112 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
6.113 }
7.1 --- a/src/sh4/xltcache.c Mon Jan 21 11:54:47 2008 +0000
7.2 +++ b/src/sh4/xltcache.c Mon Jan 21 11:59:46 2008 +0000
7.3 @@ -211,23 +211,24 @@
7.4 xlat_recovery_record_t xlat_get_recovery( void *code, void *native_pc, gboolean recover_after )
7.5 {
7.6 if( code != NULL ) {
7.7 + uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
7.8 xlat_cache_block_t block = BLOCK_FOR_CODE(code);
7.9 uint32_t count = block->recover_table_size;
7.10 xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
7.11 uint32_t posn;
7.12 if( recover_after ) {
7.13 - if( records[count-1].xlat_pc <= (uintptr_t)native_pc ) {
7.14 + if( records[count-1].xlat_offset <= pc_offset ) {
7.15 return NULL;
7.16 }
7.17 for( posn=count-1; posn > 0; posn-- ) {
7.18 - if( records[posn-1].xlat_pc < (uintptr_t)native_pc ) {
7.19 + if( records[posn-1].xlat_offset < pc_offset ) {
7.20 return &records[posn];
7.21 }
7.22 }
7.23 return &records[0]; // shouldn't happen
7.24 } else {
7.25 for( posn = 1; posn < count; posn++ ) {
7.26 - if( records[posn].xlat_pc >= (uintptr_t)native_pc ) {
7.27 + if( records[posn].xlat_offset >= pc_offset ) {
7.28 return &records[posn-1];
7.29 }
7.30 }
7.31 @@ -322,6 +323,8 @@
7.32 start_block->active = 1;
7.33 start_block->size = allocation;
7.34 start_block->lut_entry = block->lut_entry;
7.35 + start_block->recover_table_offset = block->recover_table_offset;
7.36 + start_block->recover_table_size = block->recover_table_size;
7.37 *block->lut_entry = &start_block->code;
7.38 memcpy( start_block->code, block->code, block->size );
7.39 xlat_old_cache_ptr = xlat_cut_block(start_block, size );
7.40 @@ -343,6 +346,9 @@
7.41 do {
7.42 if( curr->active == BLOCK_USED ) {
7.43 xlat_promote_to_old_space( curr );
7.44 + } else if( curr->active == BLOCK_ACTIVE ) {
7.45 + // Active but not used, release block
7.46 + *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
7.47 }
7.48 allocation += curr->size + sizeof(struct xlat_cache_block);
7.49 curr = NEXT(curr);
7.50 @@ -362,6 +368,8 @@
7.51 start_block->active = 1;
7.52 start_block->size = allocation;
7.53 start_block->lut_entry = block->lut_entry;
7.54 + start_block->recover_table_offset = block->recover_table_offset;
7.55 + start_block->recover_table_size = block->recover_table_size;
7.56 *block->lut_entry = &start_block->code;
7.57 memcpy( start_block->code, block->code, block->size );
7.58 xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
7.59 @@ -482,7 +490,7 @@
7.60 cache = NEXT(cache);
7.61 }
7.62 assert( cache == tail );
7.63 - assert( foundptr == 1 );
7.64 + assert( foundptr == 1 || tail == ptr );
7.65 }
7.66
7.67 void xlat_check_integrity( )
8.1 --- a/src/sh4/xltcache.h Mon Jan 21 11:54:47 2008 +0000
8.2 +++ b/src/sh4/xltcache.h Mon Jan 21 11:59:46 2008 +0000
8.3 @@ -32,7 +32,7 @@
8.4 *
8.5 */
8.6 typedef struct xlat_recovery_record {
8.7 - uintptr_t xlat_pc; // native (translated) pc
8.8 + uint32_t xlat_offset; // native (translated) pc
8.9 uint32_t sh4_icount; // instruction number of the corresponding SH4 instruction
8.10 // (0 = first instruction, 1 = second instruction, ... )
8.11 } *xlat_recovery_record_t;
.