Search
lxdream.org :: lxdream/src/sh4/ia32abi.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia32abi.h
changeset 577:a181aeacd6e8
prev571:9bc09948d0f2
author nkeynes
date Mon Jan 14 10:23:49 2008 +0000 (16 years ago)
branchlxdream-mmu
permissions -rw-r--r--
last change Remove asm file and convert to inline (easier to cope with platform conventions)
Add breakpoint support
Add MMU store-queue support
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Provides the implementation for the ia32 ABI (eg prologue, epilogue, and
     5  * calling conventions)
     6  *
     7  * Copyright (c) 2007 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #ifndef __lxdream_ia32abi_H
    21 #define __lxdream_ia32abi_H 1
    23 #define load_ptr( reg, ptr ) load_imm32( reg, (uint32_t)ptr );
    25 /**
    26  * Note: clobbers EAX to make the indirect call - this isn't usually
    27  * a problem since the callee will usually clobber it anyway.
    28  */
    29 #define CALL_FUNC0_SIZE 7
    30 static inline void call_func0( void *ptr )
    31 {
    32     load_imm32(R_EAX, (uint32_t)ptr);
    33     CALL_r32(R_EAX);
    34 }
    36 #define CALL_FUNC1_SIZE 11
    37 static inline void call_func1( void *ptr, int arg1 )
    38 {
    39     PUSH_r32(arg1);
    40     call_func0(ptr);
    41     ADD_imm8s_r32( 4, R_ESP );
    42 }
    44 #define CALL_FUNC2_SIZE 12
    45 static inline void call_func2( void *ptr, int arg1, int arg2 )
    46 {
    47     PUSH_r32(arg2);
    48     PUSH_r32(arg1);
    49     call_func0(ptr);
    50     ADD_imm8s_r32( 8, R_ESP );
    51 }
    53 /**
    54  * Write a double (64-bit) value into memory, with the first word in arg2a, and
    55  * the second in arg2b
    56  * NB: 30 bytes
    57  */
    58 #define MEM_WRITE_DOUBLE_SIZE 30
    59 static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
    60 {
    61     ADD_imm8s_r32( 4, addr );
    62     PUSH_r32(arg2b);
    63     PUSH_r32(addr);
    64     ADD_imm8s_r32( -4, addr );
    65     PUSH_r32(arg2a);
    66     PUSH_r32(addr);
    67     call_func0(sh4_write_long);
    68     ADD_imm8s_r32( 8, R_ESP );
    69     call_func0(sh4_write_long);
    70     ADD_imm8s_r32( 8, R_ESP );
    71 }
    73 /**
    74  * Read a double (64-bit) value from memory, writing the first word into arg2a
    75  * and the second into arg2b. The addr must not be in EAX
    76  * NB: 27 bytes
    77  */
    78 #define MEM_READ_DOUBLE_SIZE 27
    79 static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
    80 {
    81     PUSH_r32(addr);
    82     call_func0(sh4_read_long);
    83     POP_r32(R_ECX);
    84     PUSH_r32(R_EAX);
    85     ADD_imm8s_r32( 4, R_ECX );
    86     PUSH_r32(R_ECX);
    87     call_func0(sh4_read_long);
    88     ADD_imm8s_r32( 4, R_ESP );
    89     MOV_r32_r32( R_EAX, arg2b );
    90     POP_r32(arg2a);
    91 }
    93 /**
    94  * Emit the 'start of block' assembly. Sets up the stack frame and save
    95  * SI/DI as required
    96  */
    97 void sh4_translate_begin_block( sh4addr_t pc ) 
    98 {
    99     PUSH_r32(R_EBP);
   100     /* mov &sh4r, ebp */
   101     load_ptr( R_EBP, &sh4r );
   103     sh4_x86.in_delay_slot = FALSE;
   104     sh4_x86.priv_checked = FALSE;
   105     sh4_x86.fpuen_checked = FALSE;
   106     sh4_x86.branch_taken = FALSE;
   107     sh4_x86.backpatch_posn = 0;
   108     sh4_x86.recovery_posn = 0;
   109     sh4_x86.block_start_pc = pc;
   110     sh4_x86.tlb_on = IS_MMU_ENABLED();
   111     sh4_x86.tstate = TSTATE_NONE;
   112 #ifdef STACK_ALIGN
   113 	sh4_x86.stack_posn = 8;
   114 #endif
   115 }
   117 /**
   118  * Exit the block with sh4r.pc already written
   119  * Bytes: 15
   120  */
   121 void exit_block_pcset( sh4addr_t pc )
   122 {
   123     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   124     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   125     load_spreg( R_EAX, REG_OFFSET(pc) );
   126     if( sh4_x86.tlb_on ) {
   127 	call_func1(xlat_get_code_by_vma,R_EAX);
   128     } else {
   129 	call_func1(xlat_get_code,R_EAX);
   130     } 
   131     POP_r32(R_EBP);
   132     RET();
   133 }
   135 #define EXIT_BLOCK_SIZE(pc)  (24 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
   138 /**
   139  * Exit the block to an absolute PC
   140  */
   141 void exit_block( sh4addr_t pc, sh4addr_t endpc )
   142 {
   143     load_imm32( R_ECX, pc );                            // 5
   144     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   145     if( IS_IN_ICACHE(pc) ) {
   146 	MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
   147     } else if( sh4_x86.tlb_on ) {
   148 	call_func1(xlat_get_code_by_vma,R_ECX);
   149     } else {
   150 	call_func1(xlat_get_code,R_ECX);
   151     }
   152     AND_imm8s_r32( 0xFC, R_EAX ); // 3
   153     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   154     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   155     POP_r32(R_EBP);
   156     RET();
   157 }
   159 #define EXIT_BLOCK_REL_SIZE(pc)  (27 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
   161 /**
   162  * Exit the block to a relative PC
   163  */
   164 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
   165 {
   166     load_imm32( R_ECX, pc - sh4_x86.block_start_pc );   // 5
   167     ADD_sh4r_r32( R_PC, R_ECX );
   168     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   169     if( IS_IN_ICACHE(pc) ) {
   170 	MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
   171     } else if( sh4_x86.tlb_on ) {
   172 	call_func1(xlat_get_code_by_vma,R_ECX);
   173     } else {
   174 	call_func1(xlat_get_code,R_ECX);
   175     }
   176     AND_imm8s_r32( 0xFC, R_EAX ); // 3
   177     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   178     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   179     POP_r32(R_EBP);
   180     RET();
   181 }
   183 /**
   184  * Write the block trailer (exception handling block)
   185  */
   186 void sh4_translate_end_block( sh4addr_t pc ) {
   187     if( sh4_x86.branch_taken == FALSE ) {
   188 	// Didn't exit unconditionally already, so write the termination here
   189 	exit_block_rel( pc, pc );
   190     }
   191     if( sh4_x86.backpatch_posn != 0 ) {
   192 	unsigned int i;
   193 	// Raise exception
   194 	uint8_t *end_ptr = xlat_output;
   195 	MOV_r32_r32( R_EDX, R_ECX );
   196 	ADD_r32_r32( R_EDX, R_ECX );
   197 	ADD_r32_sh4r( R_ECX, R_PC );
   198 	MOV_moff32_EAX( &sh4_cpu_period );
   199 	MUL_r32( R_EDX );
   200 	ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   202 	call_func0( sh4_raise_exception );
   203 	ADD_imm8s_r32( 4, R_ESP );
   204 	load_spreg( R_EAX, R_PC );
   205 	if( sh4_x86.tlb_on ) {
   206 	    call_func1(xlat_get_code_by_vma,R_EAX);
   207 	} else {
   208 	    call_func1(xlat_get_code,R_EAX);
   209 	}
   210 	POP_r32(R_EBP);
   211 	RET();
   213 	// Exception already raised - just cleanup
   214 	uint8_t *preexc_ptr = xlat_output;
   215 	MOV_r32_r32( R_EDX, R_ECX );
   216 	ADD_r32_r32( R_EDX, R_ECX );
   217 	ADD_r32_sh4r( R_ECX, R_SPC );
   218 	MOV_moff32_EAX( &sh4_cpu_period );
   219 	MUL_r32( R_EDX );
   220 	ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   221 	load_spreg( R_EAX, R_PC );
   222 	if( sh4_x86.tlb_on ) {
   223 	    call_func1(xlat_get_code_by_vma,R_EAX);
   224 	} else {
   225 	    call_func1(xlat_get_code,R_EAX);
   226 	}
   227 	POP_r32(R_EBP);
   228 	RET();
   230 	for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
   231 	    *sh4_x86.backpatch_list[i].fixup_addr =
   232 		xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4;
   233 	    if( sh4_x86.backpatch_list[i].exc_code == -1 ) {
   234 		load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   235 		int rel = preexc_ptr - xlat_output;
   236 		JMP_rel(rel);
   237 	    } else {
   238 		PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
   239 		load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   240 		int rel = end_ptr - xlat_output;
   241 		JMP_rel(rel);
   242 	    }
   243 	}
   244     }
   245 }
   247 void *xlat_get_native_pc()
   248 {
   249     void *result = NULL;
   250     asm(
   251 	"mov %%ebp, %%eax\n\t"
   252 	"mov $0x8, %%ecx\n\t"
   253 	"mov %1, %%edx\n"
   254 "frame_loop: test %%eax, %%eax\n\t"
   255 	"je frame_not_found\n\t"
   256 	"cmp (%%eax), %%edx\n\t"
   257 	"je frame_found\n\t"
   258 	"sub $0x1, %%ecx\n\t"
   259 	"je frame_not_found\n\t"
   260 	"movl (%%eax), %%eax\n\t"
   261 	"jmp frame_loop\n"
   262 "frame_found: movl 0x4(%%eax), %0\n"
   263 "frame_not_found:"
   264 	: "=r" (result)
   265 	: "r" (&sh4r)
   266 	: "eax", "ecx", "edx" );
   267     return result;
   268 }
   270 #endif
.