Search
lxdream.org :: lxdream/src/sh4/ia64abi.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia64abi.h
changeset 908:a00debcf2600
prev901:32c5cf5e206f
next926:68f3e0fe02f1
author nkeynes
date Thu Dec 11 23:26:03 2008 +0000 (15 years ago)
permissions -rw-r--r--
last change Disable the generational translation cache - I've got no evidence that it
actually helps performance, and it simplifies things to get rid of it (in
particular, translated code doesn't have to worry about being moved now).
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Provides the implementation for the AMD64 ABI (eg prologue, epilogue, and
     5  * calling conventions)
     6  *
     7  * Copyright (c) 2007 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #ifndef lxdream_ia64abi_H
    21 #define lxdream_ia64abi_H 1
    23 #include <unwind.h>
    25 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
    27 /**
    28  * Note: clobbers EAX to make the indirect call - this isn't usually
    29  * a problem since the callee will usually clobber it anyway.
    30  * Size: 12 bytes
    31  */
    32 #define CALL_FUNC0_SIZE 12
    33 static inline void call_func0( void *ptr )
    34 {
    35     load_imm64(R_EAX, (uint64_t)ptr);
    36     CALL_r32(R_EAX);
    37 }
    39 #define CALL_FUNC1_SIZE 14
    40 static inline void call_func1( void *ptr, int arg1 )
    41 {
    42     REXW(); MOV_r32_r32(arg1, R_EDI);
    43     call_func0(ptr);
    44 }
    46 #define CALL_FUNC2_SIZE 16
    47 static inline void call_func2( void *ptr, int arg1, int arg2 )
    48 {
    49     REXW(); MOV_r32_r32(arg1, R_EDI);
    50     REXW(); MOV_r32_r32(arg2, R_ESI);
    51     call_func0(ptr);
    52 }
    54 #define MEM_WRITE_DOUBLE_SIZE 35
    55 /**
    56  * Write a double (64-bit) value into memory, with the first word in arg2a, and
    57  * the second in arg2b
    58  */
    59 static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
    60 {
    61     PUSH_r32(arg2b);
    62     PUSH_r32(addr);
    63     call_func2(sh4_write_long, addr, arg2a);
    64     POP_r32(R_EDI);
    65     POP_r32(R_ESI);
    66     ADD_imm8s_r32(4, R_EDI);
    67     call_func0(sh4_write_long);
    68 }
    70 #define MEM_READ_DOUBLE_SIZE 43
    71 /**
    72  * Read a double (64-bit) value from memory, writing the first word into arg2a
    73  * and the second into arg2b. The addr must not be in EAX
    74  */
    75 static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
    76 {
    77     REXW(); SUB_imm8s_r32( 8, R_ESP );
    78     PUSH_r32(addr);
    79     call_func1(sh4_read_long, addr);
    80     POP_r32(R_EDI);
    81     PUSH_r32(R_EAX);
    82     ADD_imm8s_r32(4, R_EDI);
    83     call_func0(sh4_read_long);
    84     MOV_r32_r32(R_EAX, arg2b);
    85     POP_r32(arg2a);
    86     REXW(); ADD_imm8s_r32( 8, R_ESP );
    87 }
    90 /**
    91  * Emit the 'start of block' assembly. Sets up the stack frame and save
    92  * SI/DI as required
    93  */
    94 void enter_block( ) 
    95 {
    96     PUSH_r32(R_EBP);
    97     /* mov &sh4r, ebp */
    98     load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
    99 }
   101 /**
   102  * Exit the block with sh4r.pc already written
   103  */
   104 void exit_block_pcset( sh4addr_t pc )
   105 {
   106     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   107     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   108     load_spreg( R_EAX, R_PC );
   109     if( sh4_x86.tlb_on ) {
   110         call_func1(xlat_get_code_by_vma,R_EAX);
   111     } else {
   112         call_func1(xlat_get_code,R_EAX);
   113     }
   114     POP_r32(R_EBP);
   115     RET();
   116 }
   118 /**
   119  * Exit the block with sh4r.new_pc written with the target address
   120  */
   121 void exit_block_newpcset( sh4addr_t pc )
   122 {
   123     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   124     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   125     load_spreg( R_EAX, R_NEW_PC );
   126     store_spreg( R_EAX, R_PC );
   127     if( sh4_x86.tlb_on ) {
   128         call_func1(xlat_get_code_by_vma,R_EAX);
   129     } else {
   130         call_func1(xlat_get_code,R_EAX);
   131     }
   132     POP_r32(R_EBP);
   133     RET();
   134 }
   136 #define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
   137 /**
   138  * Exit the block to an absolute PC
   139  */
   140 void exit_block( sh4addr_t pc, sh4addr_t endpc )
   141 {
   142     load_imm32( R_ECX, pc );                            // 5
   143     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   144     if( IS_IN_ICACHE(pc) ) {
   145         REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
   146     } else if( sh4_x86.tlb_on ) {
   147         call_func1(xlat_get_code_by_vma, R_ECX);
   148     } else {
   149         call_func1(xlat_get_code,R_ECX);
   150     }
   151     REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
   152     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   153     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   154     POP_r32(R_EBP);
   155     RET();
   156 }
   159 #define EXIT_BLOCK_REL_SIZE(pc)  (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
   161 /**
   162  * Exit the block to a relative PC
   163  */
   164 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
   165 {
   166     load_imm32( R_ECX, pc - sh4_x86.block_start_pc );   // 5
   167     ADD_sh4r_r32( R_PC, R_ECX );
   168     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   169     if( IS_IN_ICACHE(pc) ) {
   170         REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
   171     } else if( sh4_x86.tlb_on ) {
   172         call_func1(xlat_get_code_by_vma,R_ECX);
   173     } else {
   174         call_func1(xlat_get_code,R_ECX);
   175     }
   176     REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
   177     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   178     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   179     POP_r32(R_EBP);
   180     RET();
   181 }
   183 /**
   184  * Write the block trailer (exception handling block)
   185  */
   186 void sh4_translate_end_block( sh4addr_t pc ) {
   187     if( sh4_x86.branch_taken == FALSE ) {
   188         // Didn't exit unconditionally already, so write the termination here
   189         exit_block_rel( pc, pc );
   190     }
   191     if( sh4_x86.backpatch_posn != 0 ) {
   192         unsigned int i;
   193         // Raise exception
   194         uint8_t *end_ptr = xlat_output;
   195         MOV_r32_r32( R_EDX, R_ECX );
   196         ADD_r32_r32( R_EDX, R_ECX );
   197         ADD_r32_sh4r( R_ECX, R_PC );
   198         MOV_moff32_EAX( &sh4_cpu_period );
   199         MUL_r32( R_EDX );
   200         ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   202         call_func0( sh4_raise_exception );
   203         load_spreg( R_EAX, R_PC );
   204         if( sh4_x86.tlb_on ) {
   205             call_func1(xlat_get_code_by_vma,R_EAX);
   206         } else {
   207             call_func1(xlat_get_code,R_EAX);
   208         }
   209         POP_r32(R_EBP);
   210         RET();
   212         // Exception already raised - just cleanup
   213         uint8_t *preexc_ptr = xlat_output;
   214         MOV_r32_r32( R_EDX, R_ECX );
   215         ADD_r32_r32( R_EDX, R_ECX );
   216         ADD_r32_sh4r( R_ECX, R_SPC );
   217         MOV_moff32_EAX( &sh4_cpu_period );
   218         MUL_r32( R_EDX );
   219         ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   220         load_spreg( R_EDI, R_PC );
   221         if( sh4_x86.tlb_on ) {
   222             call_func0(xlat_get_code_by_vma);
   223         } else {
   224             call_func0(xlat_get_code);
   225         }
   226         POP_r32(R_EBP);
   227         RET();
   229         for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
   230             uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
   231             *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
   232             if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
   233                 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   234                 int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
   235                 if( stack_adj > 0 ) { 
   236                     ADD_imm8s_r32( stack_adj*4, R_ESP );
   237                 }
   238                 int rel = preexc_ptr - xlat_output;
   239                 JMP_rel(rel);
   240             } else {
   241                 load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
   242                 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   243                 int rel = end_ptr - xlat_output;
   244                 JMP_rel(rel);
   245             }
   246         }
   247     }
   248 }
   250 _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
   251 {
   252     void *rbp = (void *)_Unwind_GetGR(context, 6);
   253     void *expect = (((uint8_t *)&sh4r) + 128 );
   254     if( rbp == expect ) { 
   255         void **result = (void **)arg;
   256         *result = (void *)_Unwind_GetIP(context);
   257         return _URC_NORMAL_STOP;
   258     }
   260     return _URC_NO_REASON;
   261 }
   263 void *xlat_get_native_pc( void *code, uint32_t size )
   264 {
   265     struct _Unwind_Exception exc;
   267     void *result = NULL;
   268     _Unwind_Backtrace( xlat_check_frame, &result );
   269     return result;
   270 }
   272 #endif /* !lxdream_ia64abi_H */
.