Search
lxdream.org :: lxdream/src/sh4/ia32mac.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia32mac.h
changeset 906:268ea359f884
prev905:4c17ebd9ef5e
author nkeynes
date Thu Dec 11 23:26:03 2008 +0000 (15 years ago)
permissions -rw-r--r--
last change Disable the generational translation cache - I've got no evidence that it
actually helps performance, and it simplifies things to get rid of it (in
particular, translated code doesn't have to worry about being moved now).
file annotate diff log raw
nkeynes@539
     1
/**
nkeynes@586
     2
 * $Id$
nkeynes@539
     3
 * 
nkeynes@736
     4
 * Provides the implementation for the ia32 Mac OS X ABI variant 
nkeynes@736
     5
 * (eg prologue, epilogue, and calling conventions). Main difference
nkeynes@736
     6
 * from ia32abi is that stack frames are aligned on 16-byte boundaries.
nkeynes@539
     7
 *
nkeynes@539
     8
 * Copyright (c) 2007 Nathan Keynes.
nkeynes@539
     9
 *
nkeynes@539
    10
 * This program is free software; you can redistribute it and/or modify
nkeynes@539
    11
 * it under the terms of the GNU General Public License as published by
nkeynes@539
    12
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@539
    13
 * (at your option) any later version.
nkeynes@539
    14
 *
nkeynes@539
    15
 * This program is distributed in the hope that it will be useful,
nkeynes@539
    16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@539
    17
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@539
    18
 * GNU General Public License for more details.
nkeynes@539
    19
 */
nkeynes@539
    20
nkeynes@736
    21
#ifndef lxdream_ia32mac_H
nkeynes@736
    22
#define lxdream_ia32mac_H 1
nkeynes@539
    23
nkeynes@539
    24
#define load_ptr( reg, ptr ) load_imm32( reg, (uint32_t)ptr );
nkeynes@539
    25
nkeynes@539
    26
/**
nkeynes@539
    27
 * Note: clobbers EAX to make the indirect call - this isn't usually
nkeynes@539
    28
 * a problem since the callee will usually clobber it anyway.
nkeynes@539
    29
 */
nkeynes@539
    30
#define CALL_FUNC0_SIZE 13
nkeynes@539
    31
static inline void call_func0( void *ptr )
nkeynes@539
    32
{
nkeynes@539
    33
    int adj = (-sh4_x86.stack_posn)&0x0F;
nkeynes@539
    34
    SUB_imm8s_r32( adj, R_ESP );
nkeynes@905
    35
    load_imm32(R_ECX, (uint32_t)ptr);
nkeynes@905
    36
    CALL_r32(R_ECX);
nkeynes@539
    37
    ADD_imm8s_r32( adj, R_ESP );
nkeynes@539
    38
}
nkeynes@539
    39
nkeynes@905
    40
#ifdef HAVE_FASTCALL
nkeynes@905
    41
static inline void call_func1( void *ptr, int arg1 )
nkeynes@905
    42
{
nkeynes@905
    43
    int adj = (-sh4_x86.stack_posn)&0x0F;
nkeynes@905
    44
    SUB_imm8s_r32( adj, R_ESP );
nkeynes@905
    45
    if( arg1 != R_EAX ) {
nkeynes@905
    46
        MOV_r32_r32( arg1, R_EAX );
nkeynes@905
    47
    }
nkeynes@905
    48
    load_imm32(R_ECX, (uint32_t)ptr);
nkeynes@905
    49
    CALL_r32(R_ECX);
nkeynes@905
    50
    ADD_imm8s_r32( adj, R_ESP );
nkeynes@905
    51
}
nkeynes@905
    52
nkeynes@905
    53
static inline void call_func2( void *ptr, int arg1, int arg2 )
nkeynes@905
    54
{
nkeynes@905
    55
    int adj = (-sh4_x86.stack_posn)&0x0F;
nkeynes@905
    56
    SUB_imm8s_r32( adj, R_ESP );
nkeynes@905
    57
    if( arg2 != R_EDX ) {
nkeynes@905
    58
        MOV_r32_r32( arg2, R_EDX );
nkeynes@905
    59
    }
nkeynes@905
    60
    if( arg1 != R_EAX ) {
nkeynes@905
    61
        MOV_r32_r32( arg1, R_EAX );
nkeynes@905
    62
    }
nkeynes@905
    63
    load_imm32(R_ECX, (uint32_t)ptr);
nkeynes@905
    64
    CALL_r32(R_ECX);
nkeynes@905
    65
    ADD_imm8s_r32( adj, R_ESP );
nkeynes@905
    66
}
nkeynes@905
    67
nkeynes@905
    68
/**
nkeynes@905
    69
 * Write a double (64-bit) value into memory, with the first word in arg2a, and
nkeynes@905
    70
 * the second in arg2b
nkeynes@905
    71
 */
nkeynes@905
    72
static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
nkeynes@905
    73
{
nkeynes@905
    74
    PUSH_r32(arg2b);
nkeynes@905
    75
    PUSH_r32(addr);
nkeynes@905
    76
    call_func2(sh4_write_long, addr, arg2a);
nkeynes@905
    77
    POP_r32(R_EAX);
nkeynes@905
    78
    POP_r32(R_EDX);
nkeynes@905
    79
    ADD_imm8s_r32(4, R_EAX);
nkeynes@905
    80
    call_func0(sh4_write_long);
nkeynes@905
    81
}
nkeynes@905
    82
nkeynes@905
    83
/**
nkeynes@905
    84
 * Read a double (64-bit) value from memory, writing the first word into arg2a
nkeynes@905
    85
 * and the second into arg2b. The addr must not be in EAX
nkeynes@905
    86
 */
nkeynes@905
    87
static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
nkeynes@905
    88
{
nkeynes@905
    89
    PUSH_r32(addr);
nkeynes@905
    90
    call_func1(sh4_read_long, addr);
nkeynes@905
    91
    POP_r32(R_ECX);
nkeynes@905
    92
    PUSH_r32(R_EAX);
nkeynes@905
    93
    MOV_r32_r32(R_ECX, R_EAX);
nkeynes@905
    94
    ADD_imm8s_r32(4, R_EAX);
nkeynes@905
    95
    call_func0(sh4_read_long);
nkeynes@905
    96
    if( arg2b != R_EAX ) {
nkeynes@905
    97
        MOV_r32_r32(R_EAX, arg2b);
nkeynes@905
    98
    }
nkeynes@905
    99
    POP_r32(arg2a);
nkeynes@905
   100
}
nkeynes@905
   101
#else
nkeynes@539
   102
static inline void call_func1( void *ptr, int arg1 )
nkeynes@539
   103
{
nkeynes@539
   104
    int adj = (-4-sh4_x86.stack_posn)&0x0F;
nkeynes@539
   105
    SUB_imm8s_r32( adj, R_ESP );
nkeynes@539
   106
    PUSH_r32(arg1);
nkeynes@539
   107
    load_imm32(R_EAX, (uint32_t)ptr);
nkeynes@539
   108
    CALL_r32(R_EAX);
nkeynes@539
   109
    ADD_imm8s_r32( adj+4, R_ESP );
nkeynes@539
   110
    sh4_x86.stack_posn -= 4;
nkeynes@539
   111
}
nkeynes@539
   112
nkeynes@539
   113
#define CALL_FUNC2_SIZE 15
nkeynes@539
   114
static inline void call_func2( void *ptr, int arg1, int arg2 )
nkeynes@539
   115
{
nkeynes@539
   116
    int adj = (-8-sh4_x86.stack_posn)&0x0F;
nkeynes@539
   117
    SUB_imm8s_r32( adj, R_ESP );
nkeynes@539
   118
    PUSH_r32(arg2);
nkeynes@539
   119
    PUSH_r32(arg1);
nkeynes@539
   120
    load_imm32(R_EAX, (uint32_t)ptr);
nkeynes@539
   121
    CALL_r32(R_EAX);
nkeynes@539
   122
    ADD_imm8s_r32( adj+8, R_ESP );
nkeynes@539
   123
    sh4_x86.stack_posn -= 8;
nkeynes@539
   124
}
nkeynes@539
   125
nkeynes@539
   126
/**
nkeynes@539
   127
 * Write a double (64-bit) value into memory, with the first word in arg2a, and
nkeynes@539
   128
 * the second in arg2b
nkeynes@539
   129
 */
nkeynes@539
   130
static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
nkeynes@539
   131
{
nkeynes@539
   132
    int adj = (-8-sh4_x86.stack_posn)&0x0F;
nkeynes@539
   133
    SUB_imm8s_r32( adj, R_ESP );
nkeynes@539
   134
    ADD_imm8s_r32( 4, addr );
nkeynes@539
   135
    PUSH_r32(arg2b);
nkeynes@539
   136
    PUSH_r32(addr);
nkeynes@539
   137
    ADD_imm8s_r32( -4, addr );
nkeynes@539
   138
    SUB_imm8s_r32( 8, R_ESP );
nkeynes@539
   139
    PUSH_r32(arg2a);
nkeynes@539
   140
    PUSH_r32(addr);
nkeynes@539
   141
    load_imm32(R_EAX, (uint32_t)sh4_write_long);
nkeynes@539
   142
    CALL_r32(R_EAX);
nkeynes@539
   143
    ADD_imm8s_r32( 16, R_ESP );
nkeynes@539
   144
    load_imm32(R_EAX, (uint32_t)sh4_write_long);
nkeynes@539
   145
    CALL_r32(R_EAX);
nkeynes@539
   146
    ADD_imm8s_r32( adj+8, R_ESP );
nkeynes@539
   147
    sh4_x86.stack_posn -= 16;
nkeynes@539
   148
}
nkeynes@539
   149
nkeynes@539
   150
/**
nkeynes@539
   151
 * Read a double (64-bit) value from memory, writing the first word into arg2a
nkeynes@539
   152
 * and the second into arg2b. The addr must not be in EAX
nkeynes@539
   153
 */
nkeynes@539
   154
static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
nkeynes@539
   155
{
nkeynes@539
   156
    int adj = (-4-sh4_x86.stack_posn)&0x0F;
nkeynes@539
   157
    int adj2 = (-8-sh4_x86.stack_posn)&0x0F;
nkeynes@539
   158
    SUB_imm8s_r32( adj, R_ESP );
nkeynes@539
   159
    PUSH_r32(addr);
nkeynes@539
   160
    load_imm32(R_EAX, (uint32_t)sh4_read_long);
nkeynes@539
   161
    CALL_r32(R_EAX);
nkeynes@586
   162
    POP_r32(R_ECX);
nkeynes@539
   163
    SUB_imm8s_r32( adj2-adj, R_ESP );
nkeynes@539
   164
    PUSH_r32(R_EAX);
nkeynes@586
   165
    ADD_imm8s_r32( 4, R_ECX );
nkeynes@586
   166
    PUSH_r32(R_ECX);
nkeynes@539
   167
    load_imm32(R_EAX, (uint32_t)sh4_read_long);
nkeynes@539
   168
    CALL_r32(R_EAX);
nkeynes@539
   169
    ADD_imm8s_r32( 4, R_ESP );
nkeynes@539
   170
    MOV_r32_r32( R_EAX, arg2b );
nkeynes@539
   171
    POP_r32(arg2a);
nkeynes@539
   172
    ADD_imm8s_r32( adj2, R_ESP );
nkeynes@539
   173
    sh4_x86.stack_posn -= 4;
nkeynes@539
   174
}
nkeynes@539
   175
nkeynes@905
   176
#endif
nkeynes@905
   177
nkeynes@539
   178
/**
nkeynes@539
   179
 * Emit the 'start of block' assembly. Sets up the stack frame and save
nkeynes@539
   180
 * SI/DI as required
nkeynes@539
   181
 */
nkeynes@901
   182
void enter_block( ) 
nkeynes@539
   183
{
nkeynes@539
   184
    PUSH_r32(R_EBP);
nkeynes@539
   185
    /* mov &sh4r, ebp */
nkeynes@669
   186
    load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
nkeynes@539
   187
    sh4_x86.stack_posn = 8;
nkeynes@539
   188
}
nkeynes@539
   189
nkeynes@539
   190
/**
nkeynes@590
   191
 * Exit the block with sh4r.new_pc written with the target pc
nkeynes@539
   192
 */
nkeynes@586
   193
void exit_block_pcset( sh4addr_t pc )
nkeynes@539
   194
{
nkeynes@539
   195
    load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
nkeynes@539
   196
    ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
nkeynes@590
   197
    load_spreg( R_EAX, R_PC );
nkeynes@586
   198
    if( sh4_x86.tlb_on ) {
nkeynes@736
   199
        call_func1(xlat_get_code_by_vma,R_EAX);
nkeynes@586
   200
    } else {
nkeynes@736
   201
        call_func1(xlat_get_code,R_EAX);
nkeynes@586
   202
    }
nkeynes@539
   203
    POP_r32(R_EBP);
nkeynes@539
   204
    RET();
nkeynes@539
   205
}
nkeynes@539
   206
nkeynes@590
   207
/**
nkeynes@590
   208
 * Exit the block with sh4r.new_pc written with the target pc
nkeynes@590
   209
 */
nkeynes@590
   210
void exit_block_newpcset( sh4addr_t pc )
nkeynes@590
   211
{
nkeynes@590
   212
    load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
nkeynes@590
   213
    ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );    // 6
nkeynes@590
   214
    load_spreg( R_EAX, R_NEW_PC );
nkeynes@590
   215
    store_spreg( R_EAX, R_PC );
nkeynes@590
   216
    if( sh4_x86.tlb_on ) {
nkeynes@736
   217
        call_func1(xlat_get_code_by_vma,R_EAX);
nkeynes@590
   218
    } else {
nkeynes@736
   219
        call_func1(xlat_get_code,R_EAX);
nkeynes@590
   220
    }
nkeynes@590
   221
    POP_r32(R_EBP);
nkeynes@590
   222
    RET();
nkeynes@590
   223
}
nkeynes@590
   224
nkeynes@590
   225
nkeynes@539
   226
/**
nkeynes@539
   227
 * Exit the block to an absolute PC
nkeynes@539
   228
 */
nkeynes@539
   229
void exit_block( sh4addr_t pc, sh4addr_t endpc )
nkeynes@539
   230
{
nkeynes@539
   231
    load_imm32( R_ECX, pc );                            // 5
nkeynes@539
   232
    store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
nkeynes@586
   233
    if( IS_IN_ICACHE(pc) ) {
nkeynes@736
   234
        MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
nkeynes@586
   235
    } else if( sh4_x86.tlb_on ) {
nkeynes@736
   236
        call_func1(xlat_get_code_by_vma,R_ECX);
nkeynes@586
   237
    } else {
nkeynes@736
   238
        call_func1(xlat_get_code,R_ECX);
nkeynes@586
   239
    }
nkeynes@586
   240
    AND_imm8s_r32( 0xFC, R_EAX ); // 3
nkeynes@586
   241
    load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
nkeynes@586
   242
    ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
nkeynes@586
   243
    POP_r32(R_EBP);
nkeynes@586
   244
    RET();
nkeynes@586
   245
}
nkeynes@586
   246
nkeynes@586
   247
/**
nkeynes@586
   248
 * Exit the block to a relative PC
nkeynes@586
   249
 */
nkeynes@586
   250
void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
nkeynes@586
   251
{
nkeynes@586
   252
    load_imm32( R_ECX, pc - sh4_x86.block_start_pc );   // 5
nkeynes@586
   253
    ADD_sh4r_r32( R_PC, R_ECX );
nkeynes@586
   254
    store_spreg( R_ECX, REG_OFFSET(pc) );               // 3
nkeynes@586
   255
    if( IS_IN_ICACHE(pc) ) {
nkeynes@736
   256
        MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
nkeynes@586
   257
    } else if( sh4_x86.tlb_on ) {
nkeynes@736
   258
        call_func1(xlat_get_code_by_vma,R_ECX);
nkeynes@586
   259
    } else {
nkeynes@736
   260
        call_func1(xlat_get_code,R_ECX);
nkeynes@586
   261
    }
nkeynes@539
   262
    AND_imm8s_r32( 0xFC, R_EAX ); // 3
nkeynes@539
   263
    load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
nkeynes@539
   264
    ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) );     // 6
nkeynes@539
   265
    POP_r32(R_EBP);
nkeynes@539
   266
    RET();
nkeynes@539
   267
}
nkeynes@539
   268
nkeynes@539
   269
/**
nkeynes@539
   270
 * Write the block trailer (exception handling block)
nkeynes@539
   271
 */
nkeynes@539
   272
void sh4_translate_end_block( sh4addr_t pc ) {
nkeynes@539
   273
    if( sh4_x86.branch_taken == FALSE ) {
nkeynes@736
   274
        // Didn't exit unconditionally already, so write the termination here
nkeynes@736
   275
        exit_block_rel( pc, pc );
nkeynes@539
   276
    }
nkeynes@539
   277
    if( sh4_x86.backpatch_posn != 0 ) {
nkeynes@736
   278
        unsigned int i;
nkeynes@736
   279
        // Raise exception
nkeynes@736
   280
        uint8_t *end_ptr = xlat_output;
nkeynes@736
   281
        MOV_r32_r32( R_EDX, R_ECX );
nkeynes@736
   282
        ADD_r32_r32( R_EDX, R_ECX );
nkeynes@736
   283
        ADD_r32_sh4r( R_ECX, R_PC );
nkeynes@736
   284
        MOV_moff32_EAX( &sh4_cpu_period );
nkeynes@736
   285
        MUL_r32( R_EDX );
nkeynes@736
   286
        ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
nkeynes@539
   287
nkeynes@586
   288
        POP_r32(R_EDX);
nkeynes@586
   289
        call_func1( sh4_raise_exception, R_EDX );
nkeynes@736
   290
        load_spreg( R_EAX, R_PC );
nkeynes@736
   291
        if( sh4_x86.tlb_on ) {
nkeynes@736
   292
            call_func1(xlat_get_code_by_vma,R_EAX);
nkeynes@736
   293
        } else {
nkeynes@736
   294
            call_func1(xlat_get_code,R_EAX);
nkeynes@736
   295
        }
nkeynes@736
   296
        POP_r32(R_EBP);
nkeynes@736
   297
        RET();
nkeynes@539
   298
nkeynes@736
   299
        // Exception already raised - just cleanup
nkeynes@736
   300
        uint8_t *preexc_ptr = xlat_output;
nkeynes@736
   301
        MOV_r32_r32( R_EDX, R_ECX );
nkeynes@736
   302
        ADD_r32_r32( R_EDX, R_ECX );
nkeynes@736
   303
        ADD_r32_sh4r( R_ECX, R_SPC );
nkeynes@736
   304
        MOV_moff32_EAX( &sh4_cpu_period );
nkeynes@736
   305
        MUL_r32( R_EDX );
nkeynes@736
   306
        ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
nkeynes@736
   307
        load_spreg( R_EAX, R_PC );
nkeynes@736
   308
        if( sh4_x86.tlb_on ) {
nkeynes@736
   309
            call_func1(xlat_get_code_by_vma,R_EAX);
nkeynes@736
   310
        } else {
nkeynes@736
   311
            call_func1(xlat_get_code,R_EAX);
nkeynes@736
   312
        }
nkeynes@736
   313
        POP_r32(R_EBP);
nkeynes@736
   314
        RET();
nkeynes@586
   315
nkeynes@736
   316
        for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
nkeynes@736
   317
            uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
nkeynes@736
   318
            *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
nkeynes@736
   319
            if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
nkeynes@736
   320
                load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
nkeynes@736
   321
                int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
nkeynes@736
   322
                if( stack_adj > 0 ) { 
nkeynes@736
   323
                    ADD_imm8s_r32( stack_adj, R_ESP );
nkeynes@736
   324
                }
nkeynes@736
   325
                int rel = preexc_ptr - xlat_output;
nkeynes@736
   326
                JMP_rel(rel);
nkeynes@736
   327
            } else {
nkeynes@736
   328
                PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
nkeynes@736
   329
                load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
nkeynes@736
   330
                int rel = end_ptr - xlat_output;
nkeynes@736
   331
                JMP_rel(rel);
nkeynes@736
   332
            }
nkeynes@736
   333
        }
nkeynes@539
   334
    }
nkeynes@539
   335
}
nkeynes@539
   336
nkeynes@905
   337
nkeynes@905
   338
/**
nkeynes@905
   339
 * The unwind methods only work if we compiled with DWARF2 frame information
nkeynes@905
   340
 * (ie -fexceptions), otherwise we have to use the direct frame scan.
nkeynes@905
   341
 */
nkeynes@905
   342
#ifdef HAVE_EXCEPTIONS
nkeynes@905
   343
#include <unwind.h>
nkeynes@905
   344
nkeynes@905
   345
struct UnwindInfo {
nkeynes@906
   346
    uintptr_t block_start;
nkeynes@906
   347
    uintptr_t block_end;
nkeynes@905
   348
    void *pc;
nkeynes@905
   349
};
nkeynes@905
   350
nkeynes@905
   351
_Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
nkeynes@905
   352
{
nkeynes@905
   353
    struct UnwindInfo *info = arg;
nkeynes@906
   354
    void *pc = (void *)_Unwind_GetIP(context);
nkeynes@906
   355
    if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
nkeynes@906
   356
        info->pc = pc;
nkeynes@905
   357
        return _URC_NORMAL_STOP;
nkeynes@905
   358
    }
nkeynes@905
   359
nkeynes@905
   360
    return _URC_NO_REASON;
nkeynes@905
   361
}
nkeynes@905
   362
nkeynes@906
   363
void *xlat_get_native_pc( void *code, uint32_t code_size )
nkeynes@905
   364
{
nkeynes@905
   365
    struct _Unwind_Exception exc;
nkeynes@905
   366
    struct UnwindInfo info;
nkeynes@905
   367
nkeynes@906
   368
    info.pc = NULL;
nkeynes@906
   369
    info.block_start = (uintptr_t)code;
nkeynes@906
   370
    info.block_end = info.block_start + code_size;
nkeynes@905
   371
    void *result = NULL;
nkeynes@905
   372
    _Unwind_Backtrace( xlat_check_frame, &info );
nkeynes@906
   373
    return info.pc;
nkeynes@905
   374
}
nkeynes@905
   375
#else 
nkeynes@906
   376
void *xlat_get_native_pc( void *code, uint32_t code_size )
nkeynes@586
   377
{
nkeynes@586
   378
    void *result = NULL;
nkeynes@586
   379
    asm(
nkeynes@736
   380
        "mov %%ebp, %%eax\n\t"
nkeynes@736
   381
        "mov $0x8, %%ecx\n\t"
nkeynes@736
   382
        "mov %1, %%edx\n"
nkeynes@736
   383
        "frame_loop: test %%eax, %%eax\n\t"
nkeynes@736
   384
        "je frame_not_found\n\t"
nkeynes@736
   385
        "cmp (%%eax), %%edx\n\t"
nkeynes@736
   386
        "je frame_found\n\t"
nkeynes@736
   387
        "sub $0x1, %%ecx\n\t"
nkeynes@736
   388
        "je frame_not_found\n\t"
nkeynes@736
   389
        "movl (%%eax), %%eax\n\t"
nkeynes@736
   390
        "jmp frame_loop\n"
nkeynes@736
   391
        "frame_found: movl 0x4(%%eax), %0\n"
nkeynes@736
   392
        "frame_not_found:"
nkeynes@736
   393
        : "=r" (result)
nkeynes@736
   394
        : "r" (((uint8_t *)&sh4r) + 128 )
nkeynes@736
   395
        : "eax", "ecx", "edx" );
nkeynes@586
   396
    return result;
nkeynes@586
   397
}
nkeynes@905
   398
#endif
nkeynes@586
   399
nkeynes@736
   400
#endif /* !lxdream_ia32mac.h */
nkeynes@539
   401
nkeynes@539
   402
.