Search
lxdream.org :: lxdream/src/sh4/ia64abi.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia64abi.h
changeset 927:17b6b9e245d8
prev926:68f3e0fe02f1
next930:07e5b11419db
next953:f4a156508ad1
author nkeynes
date Mon Dec 15 10:44:56 2008 +0000 (11 years ago)
permissions -rw-r--r--
last change Add return-address-modifying exception return code to mmu TLB lookups (a little bit faster)
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Provides the implementation for the AMD64 ABI (eg prologue, epilogue, and
     5  * calling conventions)
     6  *
     7  * Copyright (c) 2007 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #ifndef lxdream_ia64abi_H
    21 #define lxdream_ia64abi_H 1
    23 #include <unwind.h>
    25 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
    27 /**
    28  * Note: clobbers EAX to make the indirect call - this isn't usually
    29  * a problem since the callee will usually clobber it anyway.
    30  * Size: 12 bytes
    31  */
    32 #define CALL_FUNC0_SIZE 12
    33 static inline void call_func0( void *ptr )
    34 {
    35     load_imm64(R_EAX, (uint64_t)ptr);
    36     CALL_r32(R_EAX);
    37 }
    39 #define CALL_FUNC1_SIZE 14
    40 static inline void call_func1( void *ptr, int arg1 )
    41 {
    42     REXW(); MOV_r32_r32(arg1, R_EDI);
    43     call_func0(ptr);
    44 }
    46 static inline void call_func1_exc( void *ptr, int arg1, int pc )
    47 {
    48     REXW(); MOV_r32_r32(arg1, R_EDI);
    49     load_exc_backpatch(R_ESI);
    50     call_func0(ptr);
    51 }
    53 #define CALL_FUNC2_SIZE 16
    54 static inline void call_func2( void *ptr, int arg1, int arg2 )
    55 {
    56     REXW(); MOV_r32_r32(arg1, R_EDI);
    57     REXW(); MOV_r32_r32(arg2, R_ESI);
    58     call_func0(ptr);
    59 }
    61 #define MEM_WRITE_DOUBLE_SIZE 35
    62 /**
    63  * Write a double (64-bit) value into memory, with the first word in arg2a, and
    64  * the second in arg2b
    65  */
    66 static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
    67 {
    68     PUSH_r32(arg2b);
    69     PUSH_r32(addr);
    70     call_func2(sh4_write_long, addr, arg2a);
    71     POP_r32(R_EDI);
    72     POP_r32(R_ESI);
    73     ADD_imm8s_r32(4, R_EDI);
    74     call_func0(sh4_write_long);
    75 }
    77 #define MEM_READ_DOUBLE_SIZE 43
    78 /**
    79  * Read a double (64-bit) value from memory, writing the first word into arg2a
    80  * and the second into arg2b. The addr must not be in EAX
    81  */
    82 static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
    83 {
    84     REXW(); SUB_imm8s_r32( 8, R_ESP );
    85     PUSH_r32(addr);
    86     call_func1(sh4_read_long, addr);
    87     POP_r32(R_EDI);
    88     PUSH_r32(R_EAX);
    89     ADD_imm8s_r32(4, R_EDI);
    90     call_func0(sh4_read_long);
    91     MOV_r32_r32(R_EAX, arg2b);
    92     POP_r32(arg2a);
    93     REXW(); ADD_imm8s_r32( 8, R_ESP );
    94 }
    97 /**
    98  * Emit the 'start of block' assembly. Sets up the stack frame and save
    99  * SI/DI as required
   100  */
   101 void enter_block( ) 
   102 {
   103     PUSH_r32(R_EBP);
   104     load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
   105     // Minimum aligned allocation is 16 bytes
   106     REXW(); SUB_imm8s_r32( 16, R_ESP );
   107 }
   109 static inline void exit_block( )
   110 {
   111     REXW(); ADD_imm8s_r32( 16, R_ESP );
   112     POP_r32(R_EBP);
   113     RET();
   114 }
   116 /**
   117  * Exit the block with sh4r.pc already written
   118  */
   119 void exit_block_pcset( sh4addr_t pc )
   120 {
   121     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   122     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   123     load_spreg( R_EAX, R_PC );
   124     if( sh4_x86.tlb_on ) {
   125         call_func1(xlat_get_code_by_vma,R_EAX);
   126     } else {
   127         call_func1(xlat_get_code,R_EAX);
   128     }
   129     exit_block();
   130 }
   132 /**
   133  * Exit the block with sh4r.new_pc written with the target address
   134  */
   135 void exit_block_newpcset( sh4addr_t pc )
   136 {
   137     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   138     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   139     load_spreg( R_EAX, R_NEW_PC );
   140     store_spreg( R_EAX, R_PC );
   141     if( sh4_x86.tlb_on ) {
   142         call_func1(xlat_get_code_by_vma,R_EAX);
   143     } else {
   144         call_func1(xlat_get_code,R_EAX);
   145     }
   146     exit_block();
   147 }
   149 #define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
   150 /**
   151  * Exit the block to an absolute PC
   152  */
   153 void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
   154 {
   155     load_imm32( R_ECX, pc );                            // 5
   156     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   157     if( IS_IN_ICACHE(pc) ) {
   158         REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
   159         REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
   160     } else if( sh4_x86.tlb_on ) {
   161         call_func1(xlat_get_code_by_vma, R_ECX);
   162     } else {
   163         call_func1(xlat_get_code,R_ECX);
   164     }
   165     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   166     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   167     exit_block();
   168 }
   171 #define EXIT_BLOCK_REL_SIZE(pc)  (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
   173 /**
   174  * Exit the block to a relative PC
   175  */
   176 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
   177 {
   178     load_imm32( R_ECX, pc - sh4_x86.block_start_pc );   // 5
   179     ADD_sh4r_r32( R_PC, R_ECX );
   180     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   181     if( IS_IN_ICACHE(pc) ) {
   182         REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
   183         REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
   184     } else if( sh4_x86.tlb_on ) {
   185         call_func1(xlat_get_code_by_vma,R_ECX);
   186     } else {
   187         call_func1(xlat_get_code,R_ECX);
   188     }
   189     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   190     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   191     exit_block();
   192 }
   194 /**
   195  * Write the block trailer (exception handling block)
   196  */
   197 void sh4_translate_end_block( sh4addr_t pc ) {
   198     if( sh4_x86.branch_taken == FALSE ) {
   199         // Didn't exit unconditionally already, so write the termination here
   200         exit_block_rel( pc, pc );
   201     }
   202     if( sh4_x86.backpatch_posn != 0 ) {
   203         unsigned int i;
   204         // Raise exception
   205         uint8_t *end_ptr = xlat_output;
   206         MOV_r32_r32( R_EDX, R_ECX );
   207         ADD_r32_r32( R_EDX, R_ECX );
   208         ADD_r32_sh4r( R_ECX, R_PC );
   209         MOV_moff32_EAX( &sh4_cpu_period );
   210         MUL_r32( R_EDX );
   211         ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   213         call_func0( sh4_raise_exception );
   214         load_spreg( R_EAX, R_PC );
   215         if( sh4_x86.tlb_on ) {
   216             call_func1(xlat_get_code_by_vma,R_EAX);
   217         } else {
   218             call_func1(xlat_get_code,R_EAX);
   219         }
   220         exit_block();
   222         // Exception already raised - just cleanup
   223         uint8_t *preexc_ptr = xlat_output;
   224         MOV_r32_r32( R_EDX, R_ECX );
   225         ADD_r32_r32( R_EDX, R_ECX );
   226         ADD_r32_sh4r( R_ECX, R_SPC );
   227         MOV_moff32_EAX( &sh4_cpu_period );
   228         MUL_r32( R_EDX );
   229         ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   230         load_spreg( R_EDI, R_PC );
   231         if( sh4_x86.tlb_on ) {
   232             call_func0(xlat_get_code_by_vma);
   233         } else {
   234             call_func0(xlat_get_code);
   235         }
   236         exit_block();
   238         for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
   239             uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
   240             if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
   241                 if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
   242                     *((uintptr_t *)fixup_addr) = (uintptr_t)xlat_output; 
   243                 } else {
   244                     *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
   245                 }
   246                 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   247                 int rel = preexc_ptr - xlat_output;
   248                 JMP_rel(rel);
   249             } else {
   250                 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
   251                 load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
   252                 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   253                 int rel = end_ptr - xlat_output;
   254                 JMP_rel(rel);
   255             }
   256         }
   257     }
   258 }
   260 struct UnwindInfo {
   261     uintptr_t block_start;
   262     uintptr_t block_end;
   263     void *pc;
   264 };
   266 _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
   267 {
   268     struct UnwindInfo *info = arg;
   269     void *pc = (void *)_Unwind_GetIP(context);
   270     if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
   271         info->pc = pc;
   272         return _URC_NORMAL_STOP;
   273     }
   275     return _URC_NO_REASON;
   276 }
   278 void *xlat_get_native_pc( void *code, uint32_t code_size )
   279 {
   280     struct _Unwind_Exception exc;
   281     struct UnwindInfo info;
   283     info.pc = NULL;
   284     info.block_start = (uintptr_t)code;
   285     info.block_end = info.block_start + code_size;
   286     void *result = NULL;
   287     _Unwind_Backtrace( xlat_check_frame, &info );
   288     return info.pc;
   289 }
   291 #endif /* !lxdream_ia64abi_H */
.