Search
lxdream.org :: lxdream/src/sh4/ia64abi.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia64abi.h
changeset 939:6f2302afeb89
prev930:07e5b11419db
next944:a4e31314bee1
author nkeynes
date Sat Jan 03 03:30:26 2009 +0000 (15 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change MMU work-in-progress
* Move SDRAM out into separate sdram.c
* Move all page-table management into mmu.c
* Convert UTLB management to use the new page-tables
* Rip out all calls to mmu_vma_to_phys_* and replace with direct access
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Provides the implementation for the AMD64 ABI (eg prologue, epilogue, and
     5  * calling conventions)
     6  *
     7  * Copyright (c) 2007 Nathan Keynes.
     8  *
     9  * This program is free software; you can redistribute it and/or modify
    10  * it under the terms of the GNU General Public License as published by
    11  * the Free Software Foundation; either version 2 of the License, or
    12  * (at your option) any later version.
    13  *
    14  * This program is distributed in the hope that it will be useful,
    15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    17  * GNU General Public License for more details.
    18  */
    20 #ifndef lxdream_ia64abi_H
    21 #define lxdream_ia64abi_H 1
    23 #include <unwind.h>
    25 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
    27 static inline decode_address( int addr_reg )
    28 {
    29     MOV_r32_r32( addr_reg, R_ECX ); 
    30     SHR_imm8_r32( 12, R_ECX ); 
    31     load_ptr( R_EDI, sh4_address_space );
    32     REXW(); OP(0x8B); OP(0x0C); OP(0xCF);   // mov.q [%rdi + %rcx*8], %rcx
    33 }
    35 /**
    36  * Note: clobbers EAX to make the indirect call - this isn't usually
    37  * a problem since the callee will usually clobber it anyway.
    38  * Size: 12 bytes
    39  */
    40 #define CALL_FUNC0_SIZE 12
    41 static inline void call_func0( void *ptr )
    42 {
    43     load_imm64(R_EAX, (uint64_t)ptr);
    44     CALL_r32(R_EAX);
    45 }
    47 #define CALL_FUNC1_SIZE 14
    48 static inline void call_func1( void *ptr, int arg1 )
    49 {
    50     REXW(); MOV_r32_r32(arg1, R_EDI);
    51     call_func0(ptr);
    52 }
    54 static inline void call_func1_exc( void *ptr, int arg1, int pc )
    55 {
    56     REXW(); MOV_r32_r32(arg1, R_EDI);
    57     load_exc_backpatch(R_ESI);
    58     call_func0(ptr);
    59 }
    61 static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
    62 {
    63     REXW(); MOV_r32_r32(arg1, R_EDI);
    64     CALL_r32disp8(preg, disp8);    
    65 }
    67 static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
    68 {
    69     REXW(); MOV_r32_r32(arg1, R_EDI);
    70     load_exc_backpatch(R_ESI);
    71     CALL_r32disp8(preg, disp8);
    72 }
    74 #define CALL_FUNC2_SIZE 16
    75 static inline void call_func2( void *ptr, int arg1, int arg2 )
    76 {
    77     REXW(); MOV_r32_r32(arg1, R_EDI);
    78     REXW(); MOV_r32_r32(arg2, R_ESI);
    79     call_func0(ptr);
    80 }
    82 static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
    83 {
    84     REXW(); MOV_r32_r32(arg1, R_EDI);
    85     REXW(); MOV_r32_r32(arg2, R_ESI);
    86     CALL_r32disp8(preg, disp8);    
    87 }
    89 static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
    90 {
    91     REXW(); MOV_r32_r32(arg1, R_EDI);
    92     REXW(); MOV_r32_r32(arg2, R_ESI);
    93     load_exc_backpatch(R_EDX);
    94     CALL_r32disp8(preg, disp8);
    95 }
    99 /**
   100  * Emit the 'start of block' assembly. Sets up the stack frame and save
   101  * SI/DI as required
   102  */
   103 void enter_block( ) 
   104 {
   105     PUSH_r32(R_EBP);
   106     load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
   107     // Minimum aligned allocation is 16 bytes
   108     REXW(); SUB_imm8s_r32( 16, R_ESP );
   109 }
   111 static inline void exit_block( )
   112 {
   113     REXW(); ADD_imm8s_r32( 16, R_ESP );
   114     POP_r32(R_EBP);
   115     RET();
   116 }
   118 /**
   119  * Exit the block with sh4r.pc already written
   120  */
   121 void exit_block_pcset( sh4addr_t pc )
   122 {
   123     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   124     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   125     load_spreg( R_EAX, R_PC );
   126     if( sh4_x86.tlb_on ) {
   127         call_func1(xlat_get_code_by_vma,R_EAX);
   128     } else {
   129         call_func1(xlat_get_code,R_EAX);
   130     }
   131     exit_block();
   132 }
   134 /**
   135  * Exit the block with sh4r.new_pc written with the target address
   136  */
   137 void exit_block_newpcset( sh4addr_t pc )
   138 {
   139     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   140     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   141     load_spreg( R_EAX, R_NEW_PC );
   142     store_spreg( R_EAX, R_PC );
   143     if( sh4_x86.tlb_on ) {
   144         call_func1(xlat_get_code_by_vma,R_EAX);
   145     } else {
   146         call_func1(xlat_get_code,R_EAX);
   147     }
   148     exit_block();
   149 }
   151 #define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
   152 /**
   153  * Exit the block to an absolute PC
   154  */
   155 void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
   156 {
   157     load_imm32( R_ECX, pc );                            // 5
   158     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   159     if( IS_IN_ICACHE(pc) ) {
   160         REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
   161         REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
   162     } else if( sh4_x86.tlb_on ) {
   163         call_func1(xlat_get_code_by_vma, R_ECX);
   164     } else {
   165         call_func1(xlat_get_code,R_ECX);
   166     }
   167     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   168     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   169     exit_block();
   170 }
   173 #define EXIT_BLOCK_REL_SIZE(pc)  (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
   175 /**
   176  * Exit the block to a relative PC
   177  */
   178 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
   179 {
   180     load_imm32( R_ECX, pc - sh4_x86.block_start_pc );   // 5
   181     ADD_sh4r_r32( R_PC, R_ECX );
   182     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   183     if( IS_IN_ICACHE(pc) ) {
   184         REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
   185         REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
   186     } else if( sh4_x86.tlb_on ) {
   187         call_func1(xlat_get_code_by_vma,R_ECX);
   188     } else {
   189         call_func1(xlat_get_code,R_ECX);
   190     }
   191     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   192     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   193     exit_block();
   194 }
   196 /**
   197  * Write the block trailer (exception handling block)
   198  */
   199 void sh4_translate_end_block( sh4addr_t pc ) {
   200     if( sh4_x86.branch_taken == FALSE ) {
   201         // Didn't exit unconditionally already, so write the termination here
   202         exit_block_rel( pc, pc );
   203     }
   204     if( sh4_x86.backpatch_posn != 0 ) {
   205         unsigned int i;
   206         // Raise exception
   207         uint8_t *end_ptr = xlat_output;
   208         MOV_r32_r32( R_EDX, R_ECX );
   209         ADD_r32_r32( R_EDX, R_ECX );
   210         ADD_r32_sh4r( R_ECX, R_PC );
   211         MOV_moff32_EAX( &sh4_cpu_period );
   212         MUL_r32( R_EDX );
   213         ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   215         call_func0( sh4_raise_exception );
   216         load_spreg( R_EAX, R_PC );
   217         if( sh4_x86.tlb_on ) {
   218             call_func1(xlat_get_code_by_vma,R_EAX);
   219         } else {
   220             call_func1(xlat_get_code,R_EAX);
   221         }
   222         exit_block();
   224         // Exception already raised - just cleanup
   225         uint8_t *preexc_ptr = xlat_output;
   226         MOV_r32_r32( R_EDX, R_ECX );
   227         ADD_r32_r32( R_EDX, R_ECX );
   228         ADD_r32_sh4r( R_ECX, R_SPC );
   229         MOV_moff32_EAX( &sh4_cpu_period );
   230         MUL_r32( R_EDX );
   231         ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   232         load_spreg( R_EDI, R_PC );
   233         if( sh4_x86.tlb_on ) {
   234             call_func0(xlat_get_code_by_vma);
   235         } else {
   236             call_func0(xlat_get_code);
   237         }
   238         exit_block();
   240         for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
   241             uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
   242             if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
   243                 if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
   244                     *((uintptr_t *)fixup_addr) = (uintptr_t)xlat_output; 
   245                 } else {
   246                     *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
   247                 }
   248                 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   249                 int rel = preexc_ptr - xlat_output;
   250                 JMP_rel(rel);
   251             } else {
   252                 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
   253                 load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
   254                 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   255                 int rel = end_ptr - xlat_output;
   256                 JMP_rel(rel);
   257             }
   258         }
   259     }
   260 }
   262 struct UnwindInfo {
   263     uintptr_t block_start;
   264     uintptr_t block_end;
   265     void *pc;
   266 };
   268 _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
   269 {
   270     struct UnwindInfo *info = arg;
   271     void *pc = (void *)_Unwind_GetIP(context);
   272     if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
   273         info->pc = pc;
   274         return _URC_NORMAL_STOP;
   275     }
   277     return _URC_NO_REASON;
   278 }
   280 void *xlat_get_native_pc( void *code, uint32_t code_size )
   281 {
   282     struct _Unwind_Exception exc;
   283     struct UnwindInfo info;
   285     info.pc = NULL;
   286     info.block_start = (uintptr_t)code;
   287     info.block_end = info.block_start + code_size;
   288     void *result = NULL;
   289     _Unwind_Backtrace( xlat_check_frame, &info );
   290     return info.pc;
   291 }
   293 #endif /* !lxdream_ia64abi_H */
.