Search
lxdream.org :: lxdream/src/sh4/ia32abi.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia32abi.h
changeset 590:4db6a084ca3c
prev586:2a3ba82cf243
next596:dfc0c93d882e
author nkeynes
date Thu Jan 17 10:11:37 2008 +0000 (16 years ago)
permissions -rw-r--r--
last change Add flag to skip breakpoints when it's the very first instruction of a run
(ie, so executing dreamcast_run() when the current pc is a breakpoint doesn't
just return immediately)
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Provides the implementation for the ia32 ABI (eg prologue, epilogue, and
     5  * calling conventions)
     6  *
     7  * Copyright (c) 2007 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #ifndef __lxdream_ia32abi_H
    21 #define __lxdream_ia32abi_H 1
    23 #define load_ptr( reg, ptr ) load_imm32( reg, (uint32_t)ptr );
    25 /**
    26  * Note: clobbers EAX to make the indirect call - this isn't usually
    27  * a problem since the callee will usually clobber it anyway.
    28  */
    29 #define CALL_FUNC0_SIZE 7
    30 static inline void call_func0( void *ptr )
    31 {
    32     load_imm32(R_EAX, (uint32_t)ptr);
    33     CALL_r32(R_EAX);
    34 }
    36 #define CALL_FUNC1_SIZE 11
    37 static inline void call_func1( void *ptr, int arg1 )
    38 {
    39     PUSH_r32(arg1);
    40     call_func0(ptr);
    41     ADD_imm8s_r32( 4, R_ESP );
    42 }
    44 #define CALL_FUNC2_SIZE 12
    45 static inline void call_func2( void *ptr, int arg1, int arg2 )
    46 {
    47     PUSH_r32(arg2);
    48     PUSH_r32(arg1);
    49     call_func0(ptr);
    50     ADD_imm8s_r32( 8, R_ESP );
    51 }
    53 /**
    54  * Write a double (64-bit) value into memory, with the first word in arg2a, and
    55  * the second in arg2b
    56  * NB: 30 bytes
    57  */
    58 #define MEM_WRITE_DOUBLE_SIZE 30
    59 static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
    60 {
    61     ADD_imm8s_r32( 4, addr );
    62     PUSH_r32(arg2b);
    63     PUSH_r32(addr);
    64     ADD_imm8s_r32( -4, addr );
    65     PUSH_r32(arg2a);
    66     PUSH_r32(addr);
    67     call_func0(sh4_write_long);
    68     ADD_imm8s_r32( 8, R_ESP );
    69     call_func0(sh4_write_long);
    70     ADD_imm8s_r32( 8, R_ESP );
    71 }
    73 /**
    74  * Read a double (64-bit) value from memory, writing the first word into arg2a
    75  * and the second into arg2b. The addr must not be in EAX
    76  * NB: 27 bytes
    77  */
    78 #define MEM_READ_DOUBLE_SIZE 27
    79 static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
    80 {
    81     PUSH_r32(addr);
    82     call_func0(sh4_read_long);
    83     POP_r32(R_ECX);
    84     PUSH_r32(R_EAX);
    85     ADD_imm8s_r32( 4, R_ECX );
    86     PUSH_r32(R_ECX);
    87     call_func0(sh4_read_long);
    88     ADD_imm8s_r32( 4, R_ESP );
    89     MOV_r32_r32( R_EAX, arg2b );
    90     POP_r32(arg2a);
    91 }
    93 /**
    94  * Emit the 'start of block' assembly. Sets up the stack frame and save
    95  * SI/DI as required
    96  */
    97 void sh4_translate_begin_block( sh4addr_t pc ) 
    98 {
    99     PUSH_r32(R_EBP);
   100     /* mov &sh4r, ebp */
   101     load_ptr( R_EBP, &sh4r );
   103     sh4_x86.in_delay_slot = FALSE;
   104     sh4_x86.priv_checked = FALSE;
   105     sh4_x86.fpuen_checked = FALSE;
   106     sh4_x86.branch_taken = FALSE;
   107     sh4_x86.backpatch_posn = 0;
   108     sh4_x86.recovery_posn = 0;
   109     sh4_x86.block_start_pc = pc;
   110     sh4_x86.tlb_on = IS_MMU_ENABLED();
   111     sh4_x86.tstate = TSTATE_NONE;
   112 #ifdef STACK_ALIGN
   113 	sh4_x86.stack_posn = 8;
   114 #endif
   115 }
   117 /**
   118  * Exit the block with sh4r.pc already written
   119  */
   120 void exit_block_pcset( sh4addr_t pc )
   121 {
   122     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   123     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   124     load_spreg( R_EAX, R_PC );
   125     if( sh4_x86.tlb_on ) {
   126 	call_func1(xlat_get_code_by_vma,R_EAX);
   127     } else {
   128 	call_func1(xlat_get_code,R_EAX);
   129     } 
   130     POP_r32(R_EBP);
   131     RET();
   132 }
   134 /**
   135  * Exit the block with sh4r.new_pc written with the target pc
   136  */
   137 void exit_block_newpcset( sh4addr_t pc )
   138 {
   139     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   140     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   141     load_spreg( R_EAX, R_NEW_PC );
   142     store_spreg( R_EAX, R_PC );
   143     if( sh4_x86.tlb_on ) {
   144 	call_func1(xlat_get_code_by_vma,R_EAX);
   145     } else {
   146 	call_func1(xlat_get_code,R_EAX);
   147     } 
   148     POP_r32(R_EBP);
   149     RET();
   150 }
   152 #define EXIT_BLOCK_SIZE(pc)  (24 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
   155 /**
   156  * Exit the block to an absolute PC
   157  */
   158 void exit_block( sh4addr_t pc, sh4addr_t endpc )
   159 {
   160     load_imm32( R_ECX, pc );                            // 5
   161     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   162     if( IS_IN_ICACHE(pc) ) {
   163 	MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
   164     } else if( sh4_x86.tlb_on ) {
   165 	call_func1(xlat_get_code_by_vma,R_ECX);
   166     } else {
   167 	call_func1(xlat_get_code,R_ECX);
   168     }
   169     AND_imm8s_r32( 0xFC, R_EAX ); // 3
   170     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   171     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   172     POP_r32(R_EBP);
   173     RET();
   174 }
   176 #define EXIT_BLOCK_REL_SIZE(pc)  (27 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
   178 /**
   179  * Exit the block to a relative PC
   180  */
   181 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
   182 {
   183     load_imm32( R_ECX, pc - sh4_x86.block_start_pc );   // 5
   184     ADD_sh4r_r32( R_PC, R_ECX );
   185     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   186     if( IS_IN_ICACHE(pc) ) {
   187 	MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
   188     } else if( sh4_x86.tlb_on ) {
   189 	call_func1(xlat_get_code_by_vma,R_ECX);
   190     } else {
   191 	call_func1(xlat_get_code,R_ECX);
   192     }
   193     AND_imm8s_r32( 0xFC, R_EAX ); // 3
   194     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   195     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   196     POP_r32(R_EBP);
   197     RET();
   198 }
   200 /**
   201  * Write the block trailer (exception handling block)
   202  */
   203 void sh4_translate_end_block( sh4addr_t pc ) {
   204     if( sh4_x86.branch_taken == FALSE ) {
   205 	// Didn't exit unconditionally already, so write the termination here
   206 	exit_block_rel( pc, pc );
   207     }
   208     if( sh4_x86.backpatch_posn != 0 ) {
   209 	unsigned int i;
   210 	// Raise exception
   211 	uint8_t *end_ptr = xlat_output;
   212 	MOV_r32_r32( R_EDX, R_ECX );
   213 	ADD_r32_r32( R_EDX, R_ECX );
   214 	ADD_r32_sh4r( R_ECX, R_PC );
   215 	MOV_moff32_EAX( &sh4_cpu_period );
   216 	MUL_r32( R_EDX );
   217 	ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   219 	call_func0( sh4_raise_exception );
   220 	ADD_imm8s_r32( 4, R_ESP );
   221 	load_spreg( R_EAX, R_PC );
   222 	if( sh4_x86.tlb_on ) {
   223 	    call_func1(xlat_get_code_by_vma,R_EAX);
   224 	} else {
   225 	    call_func1(xlat_get_code,R_EAX);
   226 	}
   227 	POP_r32(R_EBP);
   228 	RET();
   230 	// Exception already raised - just cleanup
   231 	uint8_t *preexc_ptr = xlat_output;
   232 	MOV_r32_r32( R_EDX, R_ECX );
   233 	ADD_r32_r32( R_EDX, R_ECX );
   234 	ADD_r32_sh4r( R_ECX, R_SPC );
   235 	MOV_moff32_EAX( &sh4_cpu_period );
   236 	MUL_r32( R_EDX );
   237 	ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   238 	load_spreg( R_EAX, R_PC );
   239 	if( sh4_x86.tlb_on ) {
   240 	    call_func1(xlat_get_code_by_vma,R_EAX);
   241 	} else {
   242 	    call_func1(xlat_get_code,R_EAX);
   243 	}
   244 	POP_r32(R_EBP);
   245 	RET();
   247 	for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
   248 	    *sh4_x86.backpatch_list[i].fixup_addr =
   249 		xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4;
   250 	    if( sh4_x86.backpatch_list[i].exc_code == -1 ) {
   251 		load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   252 		int rel = preexc_ptr - xlat_output;
   253 		JMP_rel(rel);
   254 	    } else {
   255 		PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
   256 		load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   257 		int rel = end_ptr - xlat_output;
   258 		JMP_rel(rel);
   259 	    }
   260 	}
   261     }
   262 }
   264 void *xlat_get_native_pc()
   265 {
   266     void *result = NULL;
   267     asm(
   268 	"mov %%ebp, %%eax\n\t"
   269 	"mov $0x8, %%ecx\n\t"
   270 	"mov %1, %%edx\n"
   271 "frame_loop: test %%eax, %%eax\n\t"
   272 	"je frame_not_found\n\t"
   273 	"cmp (%%eax), %%edx\n\t"
   274 	"je frame_found\n\t"
   275 	"sub $0x1, %%ecx\n\t"
   276 	"je frame_not_found\n\t"
   277 	"movl (%%eax), %%eax\n\t"
   278 	"jmp frame_loop\n"
   279 "frame_found: movl 0x4(%%eax), %0\n"
   280 "frame_not_found:"
   281 	: "=r" (result)
   282 	: "r" (&sh4r)
   283 	: "eax", "ecx", "edx" );
   284     return result;
   285 }
   287 #endif
.