Search
lxdream.org :: lxdream/src/sh4/ia32mac.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia32mac.h
changeset 736:a02d1475ccfd
prev669:ab344e42bca9
next901:32c5cf5e206f
author nkeynes
date Mon Aug 25 11:29:24 2008 +0000 (15 years ago)
permissions -rw-r--r--
last change Start unstubifying the UBC module
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Provides the implementation for the ia32 Mac OS X ABI variant 
     5  * (eg prologue, epilogue, and calling conventions). Main difference
     6  * from ia32abi is that stack frames are aligned on 16-byte boundaries.
     7  *
     8  * Copyright (c) 2007 Nathan Keynes.
     9  *
    10  * This program is free software; you can redistribute it and/or modify
    11  * it under the terms of the GNU General Public License as published by
    12  * the Free Software Foundation; either version 2 of the License, or
    13  * (at your option) any later version.
    14  *
    15  * This program is distributed in the hope that it will be useful,
    16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    18  * GNU General Public License for more details.
    19  */
    21 #ifndef lxdream_ia32mac_H
    22 #define lxdream_ia32mac_H 1
    24 #define load_ptr( reg, ptr ) load_imm32( reg, (uint32_t)ptr );
    26 /**
    27  * Note: clobbers EAX to make the indirect call - this isn't usually
    28  * a problem since the callee will usually clobber it anyway.
    29  */
    30 #define CALL_FUNC0_SIZE 13
    31 static inline void call_func0( void *ptr )
    32 {
    33     int adj = (-sh4_x86.stack_posn)&0x0F;
    34     SUB_imm8s_r32( adj, R_ESP );
    35     load_imm32(R_EAX, (uint32_t)ptr);
    36     CALL_r32(R_EAX);
    37     ADD_imm8s_r32( adj, R_ESP );
    38 }
    40 #define CALL_FUNC1_SIZE 14
    41 static inline void call_func1( void *ptr, int arg1 )
    42 {
    43     int adj = (-4-sh4_x86.stack_posn)&0x0F;
    44     SUB_imm8s_r32( adj, R_ESP );
    45     PUSH_r32(arg1);
    46     load_imm32(R_EAX, (uint32_t)ptr);
    47     CALL_r32(R_EAX);
    48     ADD_imm8s_r32( adj+4, R_ESP );
    49     sh4_x86.stack_posn -= 4;
    50 }
    52 #define CALL_FUNC2_SIZE 15
    53 static inline void call_func2( void *ptr, int arg1, int arg2 )
    54 {
    55     int adj = (-8-sh4_x86.stack_posn)&0x0F;
    56     SUB_imm8s_r32( adj, R_ESP );
    57     PUSH_r32(arg2);
    58     PUSH_r32(arg1);
    59     load_imm32(R_EAX, (uint32_t)ptr);
    60     CALL_r32(R_EAX);
    61     ADD_imm8s_r32( adj+8, R_ESP );
    62     sh4_x86.stack_posn -= 8;
    63 }
    65 /**
    66  * Write a double (64-bit) value into memory, with the first word in arg2a, and
    67  * the second in arg2b
    68  * NB: 30 bytes
    69  */
    70 #define MEM_WRITE_DOUBLE_SIZE 36
    71 static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
    72 {
    73     int adj = (-8-sh4_x86.stack_posn)&0x0F;
    74     SUB_imm8s_r32( adj, R_ESP );
    75     ADD_imm8s_r32( 4, addr );
    76     PUSH_r32(arg2b);
    77     PUSH_r32(addr);
    78     ADD_imm8s_r32( -4, addr );
    79     SUB_imm8s_r32( 8, R_ESP );
    80     PUSH_r32(arg2a);
    81     PUSH_r32(addr);
    82     load_imm32(R_EAX, (uint32_t)sh4_write_long);
    83     CALL_r32(R_EAX);
    84     ADD_imm8s_r32( 16, R_ESP );
    85     load_imm32(R_EAX, (uint32_t)sh4_write_long);
    86     CALL_r32(R_EAX);
    87     ADD_imm8s_r32( adj+8, R_ESP );
    88     sh4_x86.stack_posn -= 16;
    89 }
    91 /**
    92  * Read a double (64-bit) value from memory, writing the first word into arg2a
    93  * and the second into arg2b. The addr must not be in EAX
    94  * NB: 27 bytes
    95  */
    96 #define MEM_READ_DOUBLE_SIZE 36
    97 static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
    98 {
    99     int adj = (-4-sh4_x86.stack_posn)&0x0F;
   100     int adj2 = (-8-sh4_x86.stack_posn)&0x0F;
   101     SUB_imm8s_r32( adj, R_ESP );
   102     PUSH_r32(addr);
   103     load_imm32(R_EAX, (uint32_t)sh4_read_long);
   104     CALL_r32(R_EAX);
   105     POP_r32(R_ECX);
   106     SUB_imm8s_r32( adj2-adj, R_ESP );
   107     PUSH_r32(R_EAX);
   108     ADD_imm8s_r32( 4, R_ECX );
   109     PUSH_r32(R_ECX);
   110     load_imm32(R_EAX, (uint32_t)sh4_read_long);
   111     CALL_r32(R_EAX);
   112     ADD_imm8s_r32( 4, R_ESP );
   113     MOV_r32_r32( R_EAX, arg2b );
   114     POP_r32(arg2a);
   115     ADD_imm8s_r32( adj2, R_ESP );
   116     sh4_x86.stack_posn -= 4;
   117 }
   119 /**
   120  * Emit the 'start of block' assembly. Sets up the stack frame and save
   121  * SI/DI as required
   122  */
   123 void sh4_translate_begin_block( sh4addr_t pc ) 
   124 {
   125     PUSH_r32(R_EBP);
   126     /* mov &sh4r, ebp */
   127     load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
   129     sh4_x86.in_delay_slot = FALSE;
   130     sh4_x86.priv_checked = FALSE;
   131     sh4_x86.fpuen_checked = FALSE;
   132     sh4_x86.branch_taken = FALSE;
   133     sh4_x86.backpatch_posn = 0;
   134     sh4_x86.block_start_pc = pc;
   135     sh4_x86.tstate = TSTATE_NONE;
   136     sh4_x86.tlb_on = IS_MMU_ENABLED();
   137     sh4_x86.stack_posn = 8;
   138 }
   140 /**
   141  * Exit the block with sh4r.new_pc written with the target pc
   142  */
   143 void exit_block_pcset( sh4addr_t pc )
   144 {
   145     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   146     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   147     load_spreg( R_EAX, R_PC );
   148     if( sh4_x86.tlb_on ) {
   149         call_func1(xlat_get_code_by_vma,R_EAX);
   150     } else {
   151         call_func1(xlat_get_code,R_EAX);
   152     }
   153     POP_r32(R_EBP);
   154     RET();
   155 }
   157 /**
   158  * Exit the block with sh4r.new_pc written with the target pc
   159  */
   160 void exit_block_newpcset( sh4addr_t pc )
   161 {
   162     load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   163     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
   164     load_spreg( R_EAX, R_NEW_PC );
   165     store_spreg( R_EAX, R_PC );
   166     if( sh4_x86.tlb_on ) {
   167         call_func1(xlat_get_code_by_vma,R_EAX);
   168     } else {
   169         call_func1(xlat_get_code,R_EAX);
   170     }
   171     POP_r32(R_EBP);
   172     RET();
   173 }
   176 #define EXIT_BLOCK_SIZE(pc)  (24 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
   179 /**
   180  * Exit the block to an absolute PC
   181  */
   182 void exit_block( sh4addr_t pc, sh4addr_t endpc )
   183 {
   184     load_imm32( R_ECX, pc );                            // 5
   185     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   186     if( IS_IN_ICACHE(pc) ) {
   187         MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
   188     } else if( sh4_x86.tlb_on ) {
   189         call_func1(xlat_get_code_by_vma,R_ECX);
   190     } else {
   191         call_func1(xlat_get_code,R_ECX);
   192     }
   193     AND_imm8s_r32( 0xFC, R_EAX ); // 3
   194     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   195     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   196     POP_r32(R_EBP);
   197     RET();
   198 }
   200 #define EXIT_BLOCK_REL_SIZE(pc)  (27 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
   202 /**
   203  * Exit the block to a relative PC
   204  */
   205 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
   206 {
   207     load_imm32( R_ECX, pc - sh4_x86.block_start_pc );   // 5
   208     ADD_sh4r_r32( R_PC, R_ECX );
   209     store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
   210     if( IS_IN_ICACHE(pc) ) {
   211         MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
   212     } else if( sh4_x86.tlb_on ) {
   213         call_func1(xlat_get_code_by_vma,R_ECX);
   214     } else {
   215         call_func1(xlat_get_code,R_ECX);
   216     }
   217     AND_imm8s_r32( 0xFC, R_EAX ); // 3
   218     load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
   219     ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
   220     POP_r32(R_EBP);
   221     RET();
   222 }
   224 /**
   225  * Write the block trailer (exception handling block)
   226  */
   227 void sh4_translate_end_block( sh4addr_t pc ) {
   228     if( sh4_x86.branch_taken == FALSE ) {
   229         // Didn't exit unconditionally already, so write the termination here
   230         exit_block_rel( pc, pc );
   231     }
   232     if( sh4_x86.backpatch_posn != 0 ) {
   233         unsigned int i;
   234         // Raise exception
   235         uint8_t *end_ptr = xlat_output;
   236         MOV_r32_r32( R_EDX, R_ECX );
   237         ADD_r32_r32( R_EDX, R_ECX );
   238         ADD_r32_sh4r( R_ECX, R_PC );
   239         MOV_moff32_EAX( &sh4_cpu_period );
   240         MUL_r32( R_EDX );
   241         ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   243         POP_r32(R_EDX);
   244         call_func1( sh4_raise_exception, R_EDX );
   245         load_spreg( R_EAX, R_PC );
   246         if( sh4_x86.tlb_on ) {
   247             call_func1(xlat_get_code_by_vma,R_EAX);
   248         } else {
   249             call_func1(xlat_get_code,R_EAX);
   250         }
   251         POP_r32(R_EBP);
   252         RET();
   254         // Exception already raised - just cleanup
   255         uint8_t *preexc_ptr = xlat_output;
   256         MOV_r32_r32( R_EDX, R_ECX );
   257         ADD_r32_r32( R_EDX, R_ECX );
   258         ADD_r32_sh4r( R_ECX, R_SPC );
   259         MOV_moff32_EAX( &sh4_cpu_period );
   260         MUL_r32( R_EDX );
   261         ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
   262         load_spreg( R_EAX, R_PC );
   263         if( sh4_x86.tlb_on ) {
   264             call_func1(xlat_get_code_by_vma,R_EAX);
   265         } else {
   266             call_func1(xlat_get_code,R_EAX);
   267         }
   268         POP_r32(R_EBP);
   269         RET();
   271         for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
   272             uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
   273             *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
   274             if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
   275                 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   276                 int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
   277                 if( stack_adj > 0 ) { 
   278                     ADD_imm8s_r32( stack_adj, R_ESP );
   279                 }
   280                 int rel = preexc_ptr - xlat_output;
   281                 JMP_rel(rel);
   282             } else {
   283                 PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
   284                 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
   285                 int rel = end_ptr - xlat_output;
   286                 JMP_rel(rel);
   287             }
   288         }
   289     }
   290 }
   292 void *xlat_get_native_pc()
   293 {
   294     void *result = NULL;
   295     asm(
   296         "mov %%ebp, %%eax\n\t"
   297         "mov $0x8, %%ecx\n\t"
   298         "mov %1, %%edx\n"
   299         "frame_loop: test %%eax, %%eax\n\t"
   300         "je frame_not_found\n\t"
   301         "cmp (%%eax), %%edx\n\t"
   302         "je frame_found\n\t"
   303         "sub $0x1, %%ecx\n\t"
   304         "je frame_not_found\n\t"
   305         "movl (%%eax), %%eax\n\t"
   306         "jmp frame_loop\n"
   307         "frame_found: movl 0x4(%%eax), %0\n"
   308         "frame_not_found:"
   309         : "=r" (result)
   310         : "r" (((uint8_t *)&sh4r) + 128 )
   311         : "eax", "ecx", "edx" );
   312     return result;
   313 }
   316 #endif /* !lxdream_ia32mac.h */
.