Search
lxdream.org :: lxdream/src/sh4/ia32abi.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia32abi.h
changeset 939:6f2302afeb89
prev930:07e5b11419db
next944:a4e31314bee1
author nkeynes
date Sat Jan 03 03:30:26 2009 +0000 (15 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change MMU work-in-progress
* Move SDRAM out into separate sdram.c
* Move all page-table management into mmu.c
* Convert UTLB management to use the new page-tables
* Rip out all calls to mmu_vma_to_phys_* and replace with direct access
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Provides the implementation for the ia32 ABI variant 
     5  * (eg prologue, epilogue, and calling conventions). Stack frame is
     6  * aligned on 16-byte boundaries for the benefit of OS X (which 
     7  * requires it).
     8  *
     9  * Copyright (c) 2007 Nathan Keynes.
    10  *
    11  * This program is free software; you can redistribute it and/or modify
    12  * it under the terms of the GNU General Public License as published by
    13  * the Free Software Foundation; either version 2 of the License, or
    14  * (at your option) any later version.
    15  *
    16  * This program is distributed in the hope that it will be useful,
    17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    19  * GNU General Public License for more details.
    20  */
    22 #ifndef lxdream_ia32mac_H
    23 #define lxdream_ia32mac_H 1
    25 #define load_ptr( reg, ptr ) load_imm32( reg, (uint32_t)ptr );
    27 static inline decode_address( int addr_reg )
    28 {
    29     MOV_r32_r32( addr_reg, R_ECX ); 
    30     SHR_imm8_r32( 12, R_ECX ); 
    31     MOV_r32disp32x4_r32( R_ECX, (uintptr_t)sh4_address_space, R_ECX );
    32 }
    34 /**
    35  * Note: clobbers EAX to make the indirect call - this isn't usually
    36  * a problem since the callee will usually clobber it anyway.
    37  */
    38 static inline void call_func0( void *ptr )
    39 {
    40     CALL_ptr(ptr);
    41 }
    43 #ifdef HAVE_FASTCALL
    44 static inline void call_func1( void *ptr, int arg1 )
    45 {
    46     if( arg1 != R_EAX ) {
    47         MOV_r32_r32( arg1, R_EAX );
    48     }
    49     CALL_ptr(ptr);
    50 }
    52 static inline void call_func1_r32( int addr_reg, int arg1 )
    53 {
    54     if( arg1 != R_EAX ) {
    55         MOV_r32_r32( arg1, R_EAX );
    56     }
    57     CALL_r32(addr_reg);
    58 }
    60 static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
    61 {
    62     if( arg1 != R_EAX ) {
    63         MOV_r32_r32( arg1, R_EAX );
    64     }
    65     CALL_r32disp8(preg, disp8);
    66 }
    68 static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
    69 {
    70     if( arg1 != R_EAX ) {
    71         MOV_r32_r32( arg1, R_EAX );
    72     }
    73     load_exc_backpatch(R_EDX);
    74     CALL_r32disp8(preg, disp8);
    75 }
    77 static inline void call_func2( void *ptr, int arg1, int arg2 )
    78 {
    79     if( arg2 != R_EDX ) {
    80         MOV_r32_r32( arg2, R_EDX );
    81     }
    82     if( arg1 != R_EAX ) {
    83         MOV_r32_r32( arg1, R_EAX );
    84     }
    85     CALL_ptr(ptr);
    86 }
    88 static inline void call_func2_r32( int addr_reg, int arg1, int arg2 )
    89 {
    90     if( arg2 != R_EDX ) {
    91         MOV_r32_r32( arg2, R_EDX );
    92     }
    93     if( arg1 != R_EAX ) {
    94         MOV_r32_r32( arg1, R_EAX );
    95     }
    96     CALL_r32(addr_reg);
    97 }
    99 static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
   100 {
   101     if( arg2 != R_EDX ) {
   102         MOV_r32_r32( arg2, R_EDX );
   103     }
   104     if( arg1 != R_EAX ) {
   105         MOV_r32_r32( arg1, R_EAX );
   106     }
   107     CALL_r32disp8(preg, disp8);
   108 }
   110 static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
   111 {
   112     if( arg2 != R_EDX ) {
   113         MOV_r32_r32( arg2, R_EDX );
   114     }
   115     if( arg1 != R_EAX ) {
   116         MOV_r32_r32( arg1, R_EAX );
   117     }
   118     MOV_backpatch_esp8( 0 );
   119     CALL_r32disp8(preg, disp8);
   120 }
   124 static inline void call_func1_exc( void *ptr, int arg1, int pc )
   125 {
   126     if( arg1 != R_EAX ) {
   127         MOV_r32_r32( arg1, R_EAX );
   128     }
   129     load_exc_backpatch(R_EDX);
   130     CALL_ptr(ptr);
   131 }   
   133 static inline void call_func2_exc( void *ptr, int arg1, int arg2, int pc )
   134 {
   135     if( arg2 != R_EDX ) {
   136         MOV_r32_r32( arg2, R_EDX );
   137     }
   138     if( arg1 != R_EAX ) {
   139         MOV_r32_r32( arg1, R_EAX );
   140     }
   141     load_exc_backpatch(R_ECX);
   142     CALL_ptr(ptr);
   143 }
   145 #else
   146 static inline void call_func1( void *ptr, int arg1 )
   147 {
   148     SUB_imm8s_r32( 12, R_ESP );
   149     PUSH_r32(arg1);
   150     CALL_ptr(ptr);
   151     ADD_imm8s_r32( 16, R_ESP );
   152 }
   154 static inline void call_func2( void *ptr, int arg1, int arg2 )
   155 {
   156     SUB_imm8s_r32( 8, R_ESP );
   157     PUSH_r32(arg2);
   158     PUSH_r32(arg1);
   159     CALL_ptr(ptr);
   160     ADD_imm8s_r32( 16, R_ESP );
   161 }
   163 #endif
   165 /**
   166  * Emit the 'start of block' assembly. Sets up the stack frame and save
   167  * SI/DI as required
   168  * Allocates 8 bytes for local variables, which also has the convenient
   169  * side-effect of aligning the stack.
   170  */
   171 void enter_block( ) 
   172 {
   173     PUSH_r32(R_EBP);
   174     load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
   175     SUB_imm8s_r32( 8, R_ESP ); 
   176 }
   178 static inline void exit_block( )
   179 {
   180     ADD_imm8s_r32( 8, R_ESP );
   181     POP_r32(R_EBP);
   182     RET();
   183 }
   185 /**
   186  * Exit the block with sh4r.new_pc written with the target pc
   187  */
   188 void exit_block_pcset( sh4addr_t pc )
   189 {
   190     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   191     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   192     load_spreg( R_EAX, R_PC );
   193     if( sh4_x86.tlb_on ) {
   194         call_func1(xlat_get_code_by_vma,R_EAX);
   195     } else {
   196         call_func1(xlat_get_code,R_EAX);
   197     }
   198     exit_block();
   199 }
   201 /**
   202  * Exit the block with sh4r.new_pc written with the target pc
   203  */
   204 void exit_block_newpcset( sh4addr_t pc )
   205 {
   206     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   207     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   208     load_spreg( R_EAX, R_NEW_PC );
   209     store_spreg( R_EAX, R_PC );
   210     if( sh4_x86.tlb_on ) {
   211         call_func1(xlat_get_code_by_vma,R_EAX);
   212     } else {
   213         call_func1(xlat_get_code,R_EAX);
   214     }
   215     exit_block();
   216 }
   219 /**
   220  * Exit the block to an absolute PC
   221  */
   222 void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
   223 {
   224     load_imm32( R_ECX, pc );                            // 5
   225     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   226     if( IS_IN_ICACHE(pc) ) {
   227         MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
   228         AND_imm8s_r32( 0xFC, R_EAX ); // 3
   229     } else if( sh4_x86.tlb_on ) {
   230         call_func1(xlat_get_code_by_vma,R_ECX);
   231     } else {
   232         call_func1(xlat_get_code,R_ECX);
   233     }
   234     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   235     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   236     exit_block();
   237 }
   239 /**
   240  * Exit the block to a relative PC
   241  */
   242 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
   243 {
   244     load_imm32( R_ECX, pc - sh4_x86.block_start_pc );   // 5
   245     ADD_sh4r_r32( R_PC, R_ECX );
   246     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   247     if( IS_IN_ICACHE(pc) ) {
   248         MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
   249         AND_imm8s_r32( 0xFC, R_EAX ); // 3
   250     } else if( sh4_x86.tlb_on ) {
   251         call_func1(xlat_get_code_by_vma,R_ECX);
   252     } else {
   253         call_func1(xlat_get_code,R_ECX);
   254     }
   255     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   256     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   257     exit_block();
   258 }
   260 /**
   261  * Write the block trailer (exception handling block)
   262  */
   263 void sh4_translate_end_block( sh4addr_t pc ) {
   264     if( sh4_x86.branch_taken == FALSE ) {
   265         // Didn't exit unconditionally already, so write the termination here
   266         exit_block_rel( pc, pc );
   267     }
   268     if( sh4_x86.backpatch_posn != 0 ) {
   269         unsigned int i;
   270         // Raise exception
   271         uint8_t *end_ptr = xlat_output;
   272         MOV_r32_r32( R_EDX, R_ECX );
   273         ADD_r32_r32( R_EDX, R_ECX );
   274         ADD_r32_sh4r( R_ECX, R_PC );
   275         MOV_moff32_EAX( &sh4_cpu_period );
   276         MUL_r32( R_EDX );
   277         ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   279         POP_r32(R_EAX);
   280         call_func1( sh4_raise_exception, R_EAX );
   281         load_spreg( R_EAX, R_PC );
   282         if( sh4_x86.tlb_on ) {
   283             call_func1(xlat_get_code_by_vma,R_EAX);
   284         } else {
   285             call_func1(xlat_get_code,R_EAX);
   286         }
   287         exit_block();
   289         // Exception already raised - just cleanup
   290         uint8_t *preexc_ptr = xlat_output;
   291         MOV_r32_r32( R_EDX, R_ECX );
   292         ADD_r32_r32( R_EDX, R_ECX );
   293         ADD_r32_sh4r( R_ECX, R_SPC );
   294         MOV_moff32_EAX( &sh4_cpu_period );
   295         MUL_r32( R_EDX );
   296         ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   297         load_spreg( R_EAX, R_PC );
   298         if( sh4_x86.tlb_on ) {
   299             call_func1(xlat_get_code_by_vma,R_EAX);
   300         } else {
   301             call_func1(xlat_get_code,R_EAX);
   302         }
   303         exit_block();
   305         for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
   306             uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
   307             if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
   308                 if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
   309                     *fixup_addr = (uint32_t)xlat_output;
   310                 } else {
   311                     *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
   312                 }
   313                 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   314                 int rel = preexc_ptr - xlat_output;
   315                 JMP_rel(rel);
   316             } else {
   317                 *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
   318                 PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
   319                 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   320                 int rel = end_ptr - xlat_output;
   321                 JMP_rel(rel);
   322             }
   323         }
   324     }
   325 }
   328 /**
   329  * The unwind methods only work if we compiled with DWARF2 frame information
   330  * (ie -fexceptions), otherwise we have to use the direct frame scan.
   331  */
   332 #ifdef HAVE_EXCEPTIONS
   333 #include <unwind.h>
   335 struct UnwindInfo {
   336     uintptr_t block_start;
   337     uintptr_t block_end;
   338     void *pc;
   339 };
   341 _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
   342 {
   343     struct UnwindInfo *info = arg;
   344     void *pc = (void *)_Unwind_GetIP(context);
   345     if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
   346         info->pc = pc;
   347         return _URC_NORMAL_STOP;
   348     }
   350     return _URC_NO_REASON;
   351 }
   353 void *xlat_get_native_pc( void *code, uint32_t code_size )
   354 {
   355     struct _Unwind_Exception exc;
   356     struct UnwindInfo info;
   358     info.pc = NULL;
   359     info.block_start = (uintptr_t)code;
   360     info.block_end = info.block_start + code_size;
   361     void *result = NULL;
   362     _Unwind_Backtrace( xlat_check_frame, &info );
   363     return info.pc;
   364 }
   365 #else 
   366 void *xlat_get_native_pc( void *code, uint32_t code_size )
   367 {
   368     void *result = NULL;
   369     asm(
   370         "mov %%ebp, %%eax\n\t"
   371         "mov $0x8, %%ecx\n\t"
   372         "mov %1, %%edx\n"
   373         "frame_loop: test %%eax, %%eax\n\t"
   374         "je frame_not_found\n\t"
   375         "cmp (%%eax), %%edx\n\t"
   376         "je frame_found\n\t"
   377         "sub $0x1, %%ecx\n\t"
   378         "je frame_not_found\n\t"
   379         "movl (%%eax), %%eax\n\t"
   380         "jmp frame_loop\n"
   381         "frame_found: movl 0x4(%%eax), %0\n"
   382         "frame_not_found:"
   383         : "=r" (result)
   384         : "r" (((uint8_t *)&sh4r) + 128 )
   385         : "eax", "ecx", "edx" );
   386     return result;
   387 }
   388 #endif
   390 #endif /* !lxdream_ia32mac.h */
.