Search
lxdream.org :: lxdream/src/sh4/ia64abi.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia64abi.h
changeset 957:0f6131f6cc3a
prev953:f4a156508ad1
next991:60c7fab9c880
author nkeynes
date Thu Jan 15 11:23:20 2009 +0000 (15 years ago)
permissions -rw-r--r--
last change Fix various compile-time warnings
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Provides the implementation for the AMD64 ABI (eg prologue, epilogue, and
     5  * calling conventions)
     6  *
     7  * Copyright (c) 2007 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #ifndef lxdream_ia64abi_H
    21 #define lxdream_ia64abi_H 1
    23 #include <unwind.h>
    25 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
    27 static inline decode_address( int addr_reg )
    28 {
    29     uintptr_t base = (sh4r.xlat_sh4_mode&SR_MD) ? (uintptr_t)sh4_address_space : (uintptr_t)sh4_user_address_space;
    30     MOV_r32_r32( addr_reg, R_ECX ); 
    31     SHR_imm8_r32( 12, R_ECX ); 
    32     load_ptr( R_EDI, base );
    33     REXW(); OP(0x8B); OP(0x0C); OP(0xCF);   // mov.q [%rdi + %rcx*8], %rcx
    34 }
    36 /**
    37  * Note: clobbers EAX to make the indirect call - this isn't usually
    38  * a problem since the callee will usually clobber it anyway.
    39  * Size: 12 bytes
    40  */
    41 #define CALL_FUNC0_SIZE 12
    42 static inline void call_func0( void *ptr )
    43 {
    44     load_imm64(R_EAX, (uint64_t)ptr);
    45     CALL_r32(R_EAX);
    46 }
    48 #define CALL_FUNC1_SIZE 14
    49 static inline void call_func1( void *ptr, int arg1 )
    50 {
    51     REXW(); MOV_r32_r32(arg1, R_EDI);
    52     call_func0(ptr);
    53 }
    55 static inline void call_func1_exc( void *ptr, int arg1, int pc )
    56 {
    57     REXW(); MOV_r32_r32(arg1, R_EDI);
    58     load_exc_backpatch(R_ESI);
    59     call_func0(ptr);
    60 }
    62 static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
    63 {
    64     REXW(); MOV_r32_r32(arg1, R_EDI);
    65     CALL_r32disp8(preg, disp8);    
    66 }
    68 static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
    69 {
    70     REXW(); MOV_r32_r32(arg1, R_EDI);
    71     load_exc_backpatch(R_ESI);
    72     CALL_r32disp8(preg, disp8);
    73 }
    75 #define CALL_FUNC2_SIZE 16
    76 static inline void call_func2( void *ptr, int arg1, int arg2 )
    77 {
    78     REXW(); MOV_r32_r32(arg1, R_EDI);
    79     REXW(); MOV_r32_r32(arg2, R_ESI);
    80     call_func0(ptr);
    81 }
    83 static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
    84 {
    85     REXW(); MOV_r32_r32(arg1, R_EDI);
    86     REXW(); MOV_r32_r32(arg2, R_ESI);
    87     CALL_r32disp8(preg, disp8);    
    88 }
    90 static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
    91 {
    92     REXW(); MOV_r32_r32(arg1, R_EDI);
    93     REXW(); MOV_r32_r32(arg2, R_ESI);
    94     load_exc_backpatch(R_EDX);
    95     CALL_r32disp8(preg, disp8);
    96 }
   100 /**
   101  * Emit the 'start of block' assembly. Sets up the stack frame and save
   102  * SI/DI as required
   103  */
   104 void enter_block( ) 
   105 {
   106     PUSH_r32(R_EBP);
   107     load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
   108     // Minimum aligned allocation is 16 bytes
   109     REXW(); SUB_imm8s_r32( 16, R_ESP );
   110 }
   112 static inline void exit_block( )
   113 {
   114     REXW(); ADD_imm8s_r32( 16, R_ESP );
   115     POP_r32(R_EBP);
   116     RET();
   117 }
   119 /**
   120  * Exit the block with sh4r.pc already written
   121  */
   122 void exit_block_pcset( sh4addr_t pc )
   123 {
   124     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   125     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   126     load_spreg( R_EAX, R_PC );
   127     if( sh4_x86.tlb_on ) {
   128         call_func1(xlat_get_code_by_vma,R_EAX);
   129     } else {
   130         call_func1(xlat_get_code,R_EAX);
   131     }
   132     exit_block();
   133 }
   135 /**
   136  * Exit the block with sh4r.new_pc written with the target address
   137  */
   138 void exit_block_newpcset( sh4addr_t pc )
   139 {
   140     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   141     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   142     load_spreg( R_EAX, R_NEW_PC );
   143     store_spreg( R_EAX, R_PC );
   144     if( sh4_x86.tlb_on ) {
   145         call_func1(xlat_get_code_by_vma,R_EAX);
   146     } else {
   147         call_func1(xlat_get_code,R_EAX);
   148     }
   149     exit_block();
   150 }
   152 #define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
   153 /**
   154  * Exit the block to an absolute PC
   155  */
   156 void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
   157 {
   158     load_imm32( R_ECX, pc );                            // 5
   159     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   160     if( IS_IN_ICACHE(pc) ) {
   161         REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
   162         REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
   163     } else if( sh4_x86.tlb_on ) {
   164         call_func1(xlat_get_code_by_vma, R_ECX);
   165     } else {
   166         call_func1(xlat_get_code,R_ECX);
   167     }
   168     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   169     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   170     exit_block();
   171 }
   174 #define EXIT_BLOCK_REL_SIZE(pc)  (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
   176 /**
   177  * Exit the block to a relative PC
   178  */
   179 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
   180 {
   181     load_imm32( R_ECX, pc - sh4_x86.block_start_pc );   // 5
   182     ADD_sh4r_r32( R_PC, R_ECX );
   183     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   184     if( IS_IN_ICACHE(pc) ) {
   185         REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
   186         REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
   187     } else if( sh4_x86.tlb_on ) {
   188         call_func1(xlat_get_code_by_vma,R_ECX);
   189     } else {
   190         call_func1(xlat_get_code,R_ECX);
   191     }
   192     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   193     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   194     exit_block();
   195 }
   197 /**
   198  * Exit unconditionally with a general exception
   199  */
   200 void exit_block_exc( int code, sh4addr_t pc )
   201 {
   202     load_imm32( R_ECX, pc - sh4_x86.block_start_pc );   // 5
   203     ADD_r32_sh4r( R_ECX, R_PC );
   204     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   205     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   206     load_imm32( R_EAX, code );
   207     call_func1( sh4_raise_exception, R_EAX );
   209     load_spreg( R_EAX, R_PC );
   210     if( sh4_x86.tlb_on ) {
   211         call_func1(xlat_get_code_by_vma,R_EAX);
   212     } else {
   213         call_func1(xlat_get_code,R_EAX);
   214     }
   216     exit_block();
   217 }    
   220 /**
   221  * Write the block trailer (exception handling block)
   222  */
   223 void sh4_translate_end_block( sh4addr_t pc ) {
   224     if( sh4_x86.branch_taken == FALSE ) {
   225         // Didn't exit unconditionally already, so write the termination here
   226         exit_block_rel( pc, pc );
   227     }
   228     if( sh4_x86.backpatch_posn != 0 ) {
   229         unsigned int i;
   230         // Raise exception
   231         uint8_t *end_ptr = xlat_output;
   232         MOV_r32_r32( R_EDX, R_ECX );
   233         ADD_r32_r32( R_EDX, R_ECX );
   234         ADD_r32_sh4r( R_ECX, R_PC );
   235         MOV_moff32_EAX( &sh4_cpu_period );
   236         MUL_r32( R_EDX );
   237         ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   239         call_func0( sh4_raise_exception );
   240         load_spreg( R_EAX, R_PC );
   241         if( sh4_x86.tlb_on ) {
   242             call_func1(xlat_get_code_by_vma,R_EAX);
   243         } else {
   244             call_func1(xlat_get_code,R_EAX);
   245         }
   246         exit_block();
   248         // Exception already raised - just cleanup
   249         uint8_t *preexc_ptr = xlat_output;
   250         MOV_r32_r32( R_EDX, R_ECX );
   251         ADD_r32_r32( R_EDX, R_ECX );
   252         ADD_r32_sh4r( R_ECX, R_SPC );
   253         MOV_moff32_EAX( &sh4_cpu_period );
   254         MUL_r32( R_EDX );
   255         ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   256         load_spreg( R_EDI, R_PC );
   257         if( sh4_x86.tlb_on ) {
   258             call_func0(xlat_get_code_by_vma);
   259         } else {
   260             call_func0(xlat_get_code);
   261         }
   262         exit_block();
   264         for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
   265             uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
   266             if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
   267                 if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
   268                     *((uintptr_t *)fixup_addr) = (uintptr_t)xlat_output; 
   269                 } else {
   270                     *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
   271                 }
   272                 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   273                 int rel = preexc_ptr - xlat_output;
   274                 JMP_rel(rel);
   275             } else {
   276                 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
   277                 load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
   278                 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   279                 int rel = end_ptr - xlat_output;
   280                 JMP_rel(rel);
   281             }
   282         }
   283     }
   284 }
   286 struct UnwindInfo {
   287     uintptr_t block_start;
   288     uintptr_t block_end;
   289     void *pc;
   290 };
   292 _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
   293 {
   294     struct UnwindInfo *info = arg;
   295     void *pc = (void *)_Unwind_GetIP(context);
   296     if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
   297         info->pc = pc;
   298         return _URC_NORMAL_STOP;
   299     }
   301     return _URC_NO_REASON;
   302 }
   304 void *xlat_get_native_pc( void *code, uint32_t code_size )
   305 {
   306     struct _Unwind_Exception exc;
   307     struct UnwindInfo info;
   309     info.pc = NULL;
   310     info.block_start = (uintptr_t)code;
   311     info.block_end = info.block_start + code_size;
   312     void *result = NULL;
   313     _Unwind_Backtrace( xlat_check_frame, &info );
   314     return info.pc;
   315 }
   317 #endif /* !lxdream_ia64abi_H */
.