Search
lxdream.org :: lxdream/src/xlat/xltcache.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/xlat/xltcache.c
changeset 1189:1540105786c8
prev1188:1cc9bb0b3848
next1195:072131b61d2a
author nkeynes
date Thu Dec 01 08:02:13 2011 +1000 (10 years ago)
permissions -rw-r--r--
last change Fix make check
file annotate diff log raw
nkeynes@991
     1
/**
nkeynes@991
     2
 * $Id$
nkeynes@991
     3
 * 
nkeynes@991
     4
 * Translation cache management. This part is architecture independent.
nkeynes@991
     5
 *
nkeynes@991
     6
 * Copyright (c) 2005 Nathan Keynes.
nkeynes@991
     7
 *
nkeynes@991
     8
 * This program is free software; you can redistribute it and/or modify
nkeynes@991
     9
 * it under the terms of the GNU General Public License as published by
nkeynes@991
    10
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@991
    11
 * (at your option) any later version.
nkeynes@991
    12
 *
nkeynes@991
    13
 * This program is distributed in the hope that it will be useful,
nkeynes@991
    14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@991
    15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@991
    16
 * GNU General Public License for more details.
nkeynes@991
    17
 */
nkeynes@991
    18
nkeynes@991
    19
#include <sys/types.h>
nkeynes@991
    20
#include <sys/mman.h>
nkeynes@991
    21
#include <assert.h>
nkeynes@991
    22
nkeynes@991
    23
#include "dreamcast.h"
nkeynes@991
    24
#include "sh4/sh4core.h"
nkeynes@1186
    25
#include "sh4/sh4trans.h"
nkeynes@991
    26
#include "xlat/xltcache.h"
nkeynes@991
    27
#include "x86dasm/x86dasm.h"
nkeynes@991
    28
nkeynes@991
    29
#define XLAT_LUT_PAGE_BITS 12
nkeynes@991
    30
#define XLAT_LUT_TOTAL_BITS 28
nkeynes@991
    31
#define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
nkeynes@991
    32
#define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
nkeynes@991
    33
nkeynes@991
    34
#define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
nkeynes@991
    35
#define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
nkeynes@991
    36
#define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
nkeynes@991
    37
nkeynes@991
    38
#define XLAT_LUT_ENTRY_EMPTY (void *)0
nkeynes@991
    39
#define XLAT_LUT_ENTRY_USED  (void *)1
nkeynes@991
    40
nkeynes@1182
    41
#define XLAT_ADDR_FROM_ENTRY(pagenum,entrynum) ((((pagenum)&0xFFFF)<<13)|(((entrynum)<<1)&0x1FFE))
nkeynes@1182
    42
nkeynes@991
    43
#define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
nkeynes@991
    44
#define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
nkeynes@991
    45
#define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
nkeynes@991
    46
nkeynes@991
    47
#define MIN_BLOCK_SIZE 32
nkeynes@991
    48
#define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
nkeynes@991
    49
nkeynes@991
    50
#define BLOCK_INACTIVE 0
nkeynes@991
    51
#define BLOCK_ACTIVE 1
nkeynes@991
    52
#define BLOCK_USED 2
nkeynes@991
    53
nkeynes@991
    54
xlat_cache_block_t xlat_new_cache;
nkeynes@991
    55
xlat_cache_block_t xlat_new_cache_ptr;
nkeynes@991
    56
xlat_cache_block_t xlat_new_create_ptr;
nkeynes@991
    57
nkeynes@991
    58
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
    59
xlat_cache_block_t xlat_temp_cache;
nkeynes@991
    60
xlat_cache_block_t xlat_temp_cache_ptr;
nkeynes@991
    61
xlat_cache_block_t xlat_old_cache;
nkeynes@991
    62
xlat_cache_block_t xlat_old_cache_ptr;
nkeynes@991
    63
#endif
nkeynes@991
    64
nkeynes@991
    65
static void **xlat_lut[XLAT_LUT_PAGES];
nkeynes@991
    66
static gboolean xlat_initialized = FALSE;
nkeynes@991
    67
nkeynes@991
    68
void xlat_cache_init(void) 
nkeynes@991
    69
{
nkeynes@991
    70
    if( !xlat_initialized ) {
nkeynes@991
    71
        xlat_initialized = TRUE;
nkeynes@991
    72
        xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@991
    73
                MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@991
    74
        xlat_new_cache_ptr = xlat_new_cache;
nkeynes@991
    75
        xlat_new_create_ptr = xlat_new_cache;
nkeynes@991
    76
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
    77
        xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@991
    78
                MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@991
    79
        xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@991
    80
                MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@991
    81
        xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@991
    82
        xlat_old_cache_ptr = xlat_old_cache;
nkeynes@991
    83
#endif
nkeynes@991
    84
//        xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
nkeynes@991
    85
//                MAP_PRIVATE|MAP_ANON, -1, 0);
nkeynes@991
    86
        memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
nkeynes@991
    87
    }
nkeynes@991
    88
    xlat_flush_cache();
nkeynes@991
    89
}
nkeynes@991
    90
nkeynes@991
    91
/**
nkeynes@991
    92
 * Reset the cache structure to its default state
nkeynes@991
    93
 */
nkeynes@991
    94
void xlat_flush_cache() 
nkeynes@991
    95
{
nkeynes@991
    96
    xlat_cache_block_t tmp;
nkeynes@991
    97
    int i;
nkeynes@991
    98
    xlat_new_cache_ptr = xlat_new_cache;
nkeynes@991
    99
    xlat_new_cache_ptr->active = 0;
nkeynes@991
   100
    xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@991
   101
    tmp = NEXT(xlat_new_cache_ptr);
nkeynes@991
   102
    tmp->active = 1;
nkeynes@991
   103
    tmp->size = 0;
nkeynes@991
   104
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
   105
    xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@991
   106
    xlat_temp_cache_ptr->active = 0;
nkeynes@991
   107
    xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@991
   108
    tmp = NEXT(xlat_temp_cache_ptr);
nkeynes@991
   109
    tmp->active = 1;
nkeynes@991
   110
    tmp->size = 0;
nkeynes@991
   111
    xlat_old_cache_ptr = xlat_old_cache;
nkeynes@991
   112
    xlat_old_cache_ptr->active = 0;
nkeynes@991
   113
    xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@991
   114
    tmp = NEXT(xlat_old_cache_ptr);
nkeynes@991
   115
    tmp->active = 1;
nkeynes@991
   116
    tmp->size = 0;
nkeynes@991
   117
#endif
nkeynes@991
   118
    for( i=0; i<XLAT_LUT_PAGES; i++ ) {
nkeynes@991
   119
        if( xlat_lut[i] != NULL ) {
nkeynes@991
   120
            memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
nkeynes@991
   121
        }
nkeynes@991
   122
    }
nkeynes@991
   123
}
nkeynes@991
   124
nkeynes@1186
   125
void xlat_delete_block( xlat_cache_block_t block )
nkeynes@1186
   126
{
nkeynes@1186
   127
    block->active = 0;
nkeynes@1186
   128
    *block->lut_entry = block->chain;
nkeynes@1186
   129
    sh4_translate_unlink_block( block->use_list );
nkeynes@1186
   130
}
nkeynes@1186
   131
nkeynes@991
   132
static void xlat_flush_page_by_lut( void **page )
nkeynes@991
   133
{
nkeynes@991
   134
    int i;
nkeynes@991
   135
    for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
nkeynes@991
   136
        if( IS_ENTRY_POINT(page[i]) ) {
nkeynes@1149
   137
            void *p = page[i];
nkeynes@1149
   138
            do {
nkeynes@1149
   139
                xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
nkeynes@1186
   140
                xlat_delete_block(block);
nkeynes@1149
   141
                p = block->chain;
nkeynes@1149
   142
            } while( p != NULL );
nkeynes@991
   143
        }
nkeynes@991
   144
        page[i] = NULL;
nkeynes@991
   145
    }
nkeynes@991
   146
}
nkeynes@991
   147
nkeynes@991
   148
void FASTCALL xlat_invalidate_word( sh4addr_t addr )
nkeynes@991
   149
{
nkeynes@991
   150
    void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
nkeynes@991
   151
    if( page != NULL ) {
nkeynes@991
   152
        int entry = XLAT_LUT_ENTRY(addr);
nkeynes@991
   153
        if( page[entry] != NULL ) {
nkeynes@991
   154
            xlat_flush_page_by_lut(page);
nkeynes@991
   155
        }
nkeynes@991
   156
    }
nkeynes@991
   157
}
nkeynes@991
   158
nkeynes@991
   159
void FASTCALL xlat_invalidate_long( sh4addr_t addr )
nkeynes@991
   160
{
nkeynes@991
   161
    void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
nkeynes@991
   162
    if( page != NULL ) {
nkeynes@991
   163
        int entry = XLAT_LUT_ENTRY(addr);
nkeynes@991
   164
        if( *(uint64_t *)&page[entry] != 0 ) {
nkeynes@991
   165
            xlat_flush_page_by_lut(page);
nkeynes@991
   166
        }
nkeynes@991
   167
    }
nkeynes@991
   168
}
nkeynes@991
   169
nkeynes@991
   170
void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
nkeynes@991
   171
{
nkeynes@991
   172
    int i;
nkeynes@991
   173
    int entry_count = size >> 1; // words;
nkeynes@991
   174
    uint32_t page_no = XLAT_LUT_PAGE(address);
nkeynes@991
   175
    int entry = XLAT_LUT_ENTRY(address);
nkeynes@991
   176
    do {
nkeynes@991
   177
        void **page = xlat_lut[page_no];
nkeynes@991
   178
        int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
nkeynes@991
   179
        if( entry_count < page_entries ) {
nkeynes@991
   180
            page_entries = entry_count;
nkeynes@991
   181
        }
nkeynes@991
   182
        if( page != NULL ) {
nkeynes@991
   183
            if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
nkeynes@991
   184
                /* Overwriting the entire page anyway */
nkeynes@991
   185
                xlat_flush_page_by_lut(page);
nkeynes@991
   186
            } else {
nkeynes@991
   187
                for( i=entry; i<entry+page_entries; i++ ) {
nkeynes@991
   188
                    if( page[i] != NULL ) {
nkeynes@991
   189
                        xlat_flush_page_by_lut(page);
nkeynes@991
   190
                        break;
nkeynes@991
   191
                    }
nkeynes@991
   192
                }
nkeynes@991
   193
            }
nkeynes@991
   194
            entry_count -= page_entries;
nkeynes@991
   195
        }
nkeynes@991
   196
        page_no ++;
nkeynes@991
   197
        entry_count -= page_entries;
nkeynes@991
   198
        entry = 0;
nkeynes@991
   199
    } while( entry_count > 0 );
nkeynes@991
   200
}
nkeynes@991
   201
nkeynes@991
   202
void FASTCALL xlat_flush_page( sh4addr_t address )
nkeynes@991
   203
{
nkeynes@991
   204
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@991
   205
    if( page != NULL ) {
nkeynes@991
   206
        xlat_flush_page_by_lut(page);
nkeynes@991
   207
    }
nkeynes@991
   208
}
nkeynes@991
   209
nkeynes@991
   210
void * FASTCALL xlat_get_code( sh4addr_t address )
nkeynes@991
   211
{
nkeynes@991
   212
    void *result = NULL;
nkeynes@991
   213
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@991
   214
    if( page != NULL ) {
nkeynes@991
   215
        result = (void *)(((uintptr_t)(page[XLAT_LUT_ENTRY(address)])) & (~((uintptr_t)0x03)));
nkeynes@991
   216
    }
nkeynes@991
   217
    return result;
nkeynes@991
   218
}
nkeynes@991
   219
nkeynes@991
   220
xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
nkeynes@991
   221
{
nkeynes@991
   222
    if( code != NULL ) {
nkeynes@991
   223
        uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
nkeynes@991
   224
        xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
nkeynes@991
   225
        uint32_t count = block->recover_table_size;
nkeynes@991
   226
        xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
nkeynes@991
   227
        uint32_t posn;
nkeynes@991
   228
        for( posn = 1; posn < count; posn++ ) {
nkeynes@1003
   229
        	if( records[posn].xlat_offset >= pc_offset ) {
nkeynes@991
   230
        		return &records[posn-1];
nkeynes@991
   231
        	}
nkeynes@991
   232
        }
nkeynes@991
   233
        return &records[count-1];
nkeynes@991
   234
    }
nkeynes@991
   235
    return NULL;	
nkeynes@991
   236
}
nkeynes@991
   237
nkeynes@991
   238
void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
nkeynes@991
   239
{
nkeynes@991
   240
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@991
   241
nkeynes@991
   242
    /* Add the LUT entry for the block */
nkeynes@991
   243
    if( page == NULL ) {
nkeynes@991
   244
        xlat_lut[XLAT_LUT_PAGE(address)] = page =
nkeynes@991
   245
            (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
nkeynes@991
   246
                    MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@991
   247
        memset( page, 0, XLAT_LUT_PAGE_SIZE );
nkeynes@991
   248
    }
nkeynes@991
   249
nkeynes@991
   250
    return &page[XLAT_LUT_ENTRY(address)];
nkeynes@991
   251
}
nkeynes@991
   252
nkeynes@991
   253
nkeynes@991
   254
nkeynes@991
   255
uint32_t FASTCALL xlat_get_block_size( void *block )
nkeynes@991
   256
{
nkeynes@991
   257
    xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
nkeynes@991
   258
    return xlt->size;
nkeynes@991
   259
}
nkeynes@991
   260
nkeynes@991
   261
uint32_t FASTCALL xlat_get_code_size( void *block )
nkeynes@991
   262
{
nkeynes@991
   263
    xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
nkeynes@991
   264
    if( xlt->recover_table_offset == 0 ) {
nkeynes@991
   265
        return xlt->size;
nkeynes@991
   266
    } else {
nkeynes@991
   267
        return xlt->recover_table_offset;
nkeynes@991
   268
    }
nkeynes@991
   269
}
nkeynes@991
   270
nkeynes@991
   271
/**
nkeynes@991
   272
 * Cut the specified block so that it has the given size, with the remaining data
nkeynes@991
   273
 * forming a new free block. If the free block would be less than the minimum size,
nkeynes@991
   274
 * the cut is not performed.
nkeynes@991
   275
 * @return the next block after the (possibly cut) block.
nkeynes@991
   276
 */
nkeynes@991
   277
static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
nkeynes@991
   278
{
nkeynes@991
   279
    cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
nkeynes@991
   280
    assert( cutsize <= block->size );
nkeynes@991
   281
    if( block->size > cutsize + MIN_TOTAL_SIZE ) {
nkeynes@991
   282
        int oldsize = block->size;
nkeynes@991
   283
        block->size = cutsize;
nkeynes@991
   284
        xlat_cache_block_t next = NEXT(block);
nkeynes@991
   285
        next->active = 0;
nkeynes@991
   286
        next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
nkeynes@991
   287
        return next;
nkeynes@991
   288
    } else {
nkeynes@991
   289
        return NEXT(block);
nkeynes@991
   290
    }
nkeynes@991
   291
}
nkeynes@991
   292
nkeynes@991
   293
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
   294
/**
nkeynes@991
   295
 * Promote a block in temp space (or elsewhere for that matter) to old space.
nkeynes@991
   296
 *
nkeynes@991
   297
 * @param block to promote.
nkeynes@991
   298
 */
nkeynes@991
   299
static void xlat_promote_to_old_space( xlat_cache_block_t block )
nkeynes@991
   300
{
nkeynes@991
   301
    int allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   302
    int size = block->size;
nkeynes@991
   303
    xlat_cache_block_t curr = xlat_old_cache_ptr;
nkeynes@991
   304
    xlat_cache_block_t start_block = curr;
nkeynes@991
   305
    do {
nkeynes@991
   306
        allocation += curr->size + sizeof(struct xlat_cache_block);
nkeynes@991
   307
        curr = NEXT(curr);
nkeynes@991
   308
        if( allocation > size ) {
nkeynes@991
   309
            break; /* done */
nkeynes@991
   310
        }
nkeynes@991
   311
        if( curr->size == 0 ) { /* End-of-cache Sentinel */
nkeynes@991
   312
            /* Leave what we just released as free space and start again from the
nkeynes@991
   313
             * top of the cache
nkeynes@991
   314
             */
nkeynes@991
   315
            start_block->active = 0;
nkeynes@991
   316
            start_block->size = allocation;
nkeynes@991
   317
            allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   318
            start_block = curr = xlat_old_cache;
nkeynes@991
   319
        }
nkeynes@991
   320
    } while(1);
nkeynes@991
   321
    start_block->active = 1;
nkeynes@991
   322
    start_block->size = allocation;
nkeynes@991
   323
    start_block->lut_entry = block->lut_entry;
nkeynes@1149
   324
    start_block->chain = block->chain;
nkeynes@991
   325
    start_block->fpscr_mask = block->fpscr_mask;
nkeynes@991
   326
    start_block->fpscr = block->fpscr;
nkeynes@991
   327
    start_block->recover_table_offset = block->recover_table_offset;
nkeynes@991
   328
    start_block->recover_table_size = block->recover_table_size;
nkeynes@991
   329
    *block->lut_entry = &start_block->code;
nkeynes@991
   330
    memcpy( start_block->code, block->code, block->size );
nkeynes@991
   331
    xlat_old_cache_ptr = xlat_cut_block(start_block, size );
nkeynes@991
   332
    if( xlat_old_cache_ptr->size == 0 ) {
nkeynes@991
   333
        xlat_old_cache_ptr = xlat_old_cache;
nkeynes@991
   334
    }
nkeynes@991
   335
}
nkeynes@991
   336
nkeynes@991
   337
/**
nkeynes@991
   338
 * Similarly to the above method, promotes a block to temp space.
nkeynes@991
   339
 * TODO: Try to combine these - they're nearly identical
nkeynes@991
   340
 */
nkeynes@991
   341
void xlat_promote_to_temp_space( xlat_cache_block_t block )
nkeynes@991
   342
{
nkeynes@991
   343
    int size = block->size;
nkeynes@991
   344
    int allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   345
    xlat_cache_block_t curr = xlat_temp_cache_ptr;
nkeynes@991
   346
    xlat_cache_block_t start_block = curr;
nkeynes@991
   347
    do {
nkeynes@991
   348
        if( curr->active == BLOCK_USED ) {
nkeynes@991
   349
            xlat_promote_to_old_space( curr );
nkeynes@991
   350
        } else if( curr->active == BLOCK_ACTIVE ) {
nkeynes@991
   351
            // Active but not used, release block
nkeynes@991
   352
            *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
nkeynes@991
   353
        }
nkeynes@991
   354
        allocation += curr->size + sizeof(struct xlat_cache_block);
nkeynes@991
   355
        curr = NEXT(curr);
nkeynes@991
   356
        if( allocation > size ) {
nkeynes@991
   357
            break; /* done */
nkeynes@991
   358
        }
nkeynes@991
   359
        if( curr->size == 0 ) { /* End-of-cache Sentinel */
nkeynes@991
   360
            /* Leave what we just released as free space and start again from the
nkeynes@991
   361
             * top of the cache
nkeynes@991
   362
             */
nkeynes@991
   363
            start_block->active = 0;
nkeynes@991
   364
            start_block->size = allocation;
nkeynes@991
   365
            allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   366
            start_block = curr = xlat_temp_cache;
nkeynes@991
   367
        }
nkeynes@991
   368
    } while(1);
nkeynes@991
   369
    start_block->active = 1;
nkeynes@991
   370
    start_block->size = allocation;
nkeynes@991
   371
    start_block->lut_entry = block->lut_entry;
nkeynes@1149
   372
    start_block->chain = block->chain;
nkeynes@991
   373
    start_block->fpscr_mask = block->fpscr_mask;
nkeynes@991
   374
    start_block->fpscr = block->fpscr;
nkeynes@991
   375
    start_block->recover_table_offset = block->recover_table_offset;
nkeynes@991
   376
    start_block->recover_table_size = block->recover_table_size;
nkeynes@991
   377
    *block->lut_entry = &start_block->code;
nkeynes@991
   378
    memcpy( start_block->code, block->code, block->size );
nkeynes@991
   379
    xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
nkeynes@991
   380
    if( xlat_temp_cache_ptr->size == 0 ) {
nkeynes@991
   381
        xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@991
   382
    }
nkeynes@991
   383
nkeynes@991
   384
}
nkeynes@991
   385
#else 
nkeynes@991
   386
void xlat_promote_to_temp_space( xlat_cache_block_t block )
nkeynes@991
   387
{
nkeynes@1186
   388
    *block->lut_entry = block->chain;
nkeynes@1186
   389
    xlat_delete_block(block);
nkeynes@991
   390
}
nkeynes@991
   391
#endif
nkeynes@991
   392
nkeynes@991
   393
/**
nkeynes@991
   394
 * Returns the next block in the new cache list that can be written to by the
nkeynes@991
   395
 * translator. If the next block is active, it is evicted first.
nkeynes@991
   396
 */
nkeynes@991
   397
xlat_cache_block_t xlat_start_block( sh4addr_t address )
nkeynes@991
   398
{
nkeynes@991
   399
    if( xlat_new_cache_ptr->size == 0 ) {
nkeynes@991
   400
        xlat_new_cache_ptr = xlat_new_cache;
nkeynes@991
   401
    }
nkeynes@991
   402
nkeynes@991
   403
    if( xlat_new_cache_ptr->active ) {
nkeynes@991
   404
        xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@991
   405
    }
nkeynes@991
   406
    xlat_new_create_ptr = xlat_new_cache_ptr;
nkeynes@991
   407
    xlat_new_create_ptr->active = 1;
nkeynes@991
   408
    xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@991
   409
nkeynes@991
   410
    /* Add the LUT entry for the block */
nkeynes@991
   411
    if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) {
nkeynes@991
   412
        xlat_lut[XLAT_LUT_PAGE(address)] =
nkeynes@991
   413
            (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
nkeynes@991
   414
                    MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@991
   415
        memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE );
nkeynes@991
   416
    }
nkeynes@991
   417
nkeynes@991
   418
    if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) {
nkeynes@1149
   419
        void *p = xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)];
nkeynes@1149
   420
        xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_CODE(p);
nkeynes@1149
   421
        assert( oldblock->active );
nkeynes@1149
   422
        xlat_new_create_ptr->chain = p;
nkeynes@1149
   423
    } else {
nkeynes@1149
   424
        xlat_new_create_ptr->chain = NULL;
nkeynes@991
   425
    }
nkeynes@1186
   426
    xlat_new_create_ptr->use_list = NULL;
nkeynes@991
   427
nkeynes@991
   428
    xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] = 
nkeynes@991
   429
        &xlat_new_create_ptr->code;
nkeynes@991
   430
    xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address);
nkeynes@991
   431
nkeynes@991
   432
    return xlat_new_create_ptr;
nkeynes@991
   433
}
nkeynes@991
   434
nkeynes@991
   435
xlat_cache_block_t xlat_extend_block( uint32_t newSize )
nkeynes@991
   436
{
nkeynes@1186
   437
    assert( xlat_new_create_ptr->use_list == NULL );
nkeynes@991
   438
    while( xlat_new_create_ptr->size < newSize ) {
nkeynes@991
   439
        if( xlat_new_cache_ptr->size == 0 ) {
nkeynes@991
   440
            /* Migrate to the front of the cache to keep it contiguous */
nkeynes@991
   441
            xlat_new_create_ptr->active = 0;
nkeynes@991
   442
            sh4ptr_t olddata = xlat_new_create_ptr->code;
nkeynes@991
   443
            int oldsize = xlat_new_create_ptr->size;
nkeynes@991
   444
            int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
nkeynes@991
   445
            void **lut_entry = xlat_new_create_ptr->lut_entry;
nkeynes@1149
   446
            void *chain = xlat_new_create_ptr->chain;
nkeynes@991
   447
            int allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   448
            xlat_new_cache_ptr = xlat_new_cache;
nkeynes@991
   449
            do {
nkeynes@991
   450
                if( xlat_new_cache_ptr->active ) {
nkeynes@991
   451
                    xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@991
   452
                }
nkeynes@991
   453
                allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
nkeynes@991
   454
                xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@991
   455
            } while( allocation < size );
nkeynes@991
   456
            xlat_new_create_ptr = xlat_new_cache;
nkeynes@991
   457
            xlat_new_create_ptr->active = 1;
nkeynes@991
   458
            xlat_new_create_ptr->size = allocation;
nkeynes@991
   459
            xlat_new_create_ptr->lut_entry = lut_entry;
nkeynes@1149
   460
            xlat_new_create_ptr->chain = chain;
nkeynes@1186
   461
            xlat_new_create_ptr->use_list = NULL;
nkeynes@991
   462
            *lut_entry = &xlat_new_create_ptr->code;
nkeynes@991
   463
            memmove( xlat_new_create_ptr->code, olddata, oldsize );
nkeynes@991
   464
        } else {
nkeynes@991
   465
            if( xlat_new_cache_ptr->active ) {
nkeynes@991
   466
                xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@991
   467
            }
nkeynes@991
   468
            xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
nkeynes@991
   469
            xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@991
   470
        }
nkeynes@991
   471
    }
nkeynes@991
   472
    return xlat_new_create_ptr;
nkeynes@991
   473
nkeynes@991
   474
}
nkeynes@991
   475
nkeynes@991
   476
void xlat_commit_block( uint32_t destsize, uint32_t srcsize )
nkeynes@991
   477
{
nkeynes@991
   478
    void **ptr = xlat_new_create_ptr->lut_entry;
nkeynes@1126
   479
    void **endptr = ptr + (srcsize>>1);
nkeynes@991
   480
    while( ptr < endptr ) {
nkeynes@991
   481
        if( *ptr == NULL ) {
nkeynes@991
   482
            *ptr = XLAT_LUT_ENTRY_USED;
nkeynes@991
   483
        }
nkeynes@991
   484
        ptr++;
nkeynes@991
   485
    }
nkeynes@991
   486
nkeynes@991
   487
    xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
nkeynes@991
   488
}
nkeynes@991
   489
nkeynes@991
   490
void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
nkeynes@991
   491
{
nkeynes@991
   492
    int foundptr = 0;
nkeynes@991
   493
    xlat_cache_block_t tail = 
nkeynes@991
   494
        (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
nkeynes@991
   495
nkeynes@991
   496
    assert( tail->active == 1 );
nkeynes@991
   497
    assert( tail->size == 0 ); 
nkeynes@991
   498
    while( cache < tail ) {
nkeynes@991
   499
        assert( cache->active >= 0 && cache->active <= 2 );
nkeynes@991
   500
        assert( cache->size >= 0 && cache->size < size );
nkeynes@991
   501
        if( cache == ptr ) {
nkeynes@991
   502
            foundptr = 1;
nkeynes@991
   503
        }
nkeynes@991
   504
        cache = NEXT(cache);
nkeynes@991
   505
    }
nkeynes@991
   506
    assert( cache == tail );
nkeynes@991
   507
    assert( foundptr == 1 || tail == ptr );
nkeynes@991
   508
}
nkeynes@991
   509
nkeynes@1091
   510
/**
nkeynes@1175
   511
 * Perform a reverse lookup to determine the SH4 address corresponding to
nkeynes@1175
   512
 * the start of the code block containing ptr. This is _slow_ - it does a
nkeynes@1175
   513
 * linear scan of the lookup table to find this.
nkeynes@1175
   514
 *
nkeynes@1175
   515
 * If the pointer cannot be found in any live block, returns -1 (as this
nkeynes@1175
   516
 * is not a legal PC)
nkeynes@1175
   517
 */
nkeynes@1175
   518
sh4addr_t xlat_get_address( unsigned char *ptr )
nkeynes@1175
   519
{
nkeynes@1175
   520
    int i,j;
nkeynes@1175
   521
    for( i=0; i<XLAT_LUT_PAGES; i++ ) {
nkeynes@1175
   522
        void **page = xlat_lut[i];
nkeynes@1175
   523
        if( page != NULL ) {
nkeynes@1175
   524
            for( j=0; j<XLAT_LUT_PAGE_ENTRIES; j++ ) {
nkeynes@1175
   525
                void *entry = page[j];
nkeynes@1186
   526
                if( ((uintptr_t)entry) > (uintptr_t)XLAT_LUT_ENTRY_USED ) {
nkeynes@1175
   527
                    xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(entry);
nkeynes@1175
   528
                    if( ptr >= block->code && ptr < block->code + block->size) {
nkeynes@1175
   529
                        /* Found it */
nkeynes@1175
   530
                        return (i<<13) | (j<<1);
nkeynes@1175
   531
                    }
nkeynes@1175
   532
                }
nkeynes@1175
   533
            }
nkeynes@1175
   534
        }
nkeynes@1175
   535
    }
nkeynes@1175
   536
    return -1;
nkeynes@1175
   537
}
nkeynes@1175
   538
nkeynes@1175
   539
/**
nkeynes@1091
   540
 * Sanity check that the given pointer is at least contained in one of cache
nkeynes@1091
   541
 * regions, and has a sane-ish size. We don't do a full region walk atm.
nkeynes@1091
   542
 */
nkeynes@1091
   543
gboolean xlat_is_code_pointer( void *p )
nkeynes@1091
   544
{
nkeynes@1091
   545
    char *region;
nkeynes@1091
   546
    uintptr_t region_size;
nkeynes@1091
   547
nkeynes@1091
   548
    xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
nkeynes@1091
   549
    if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) {
nkeynes@1091
   550
         /* Pointer is in new cache */
nkeynes@1091
   551
        region = (char *)xlat_new_cache;
nkeynes@1091
   552
        region_size = XLAT_NEW_CACHE_SIZE;
nkeynes@1091
   553
    }
nkeynes@1091
   554
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@1091
   555
    else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) {
nkeynes@1091
   556
         /* Pointer is in temp cache */
nkeynes@1091
   557
        region = (char *)xlat_temp_cache;
nkeynes@1091
   558
        region_size = XLAT_TEMP_CACHE_SIZE;
nkeynes@1091
   559
    } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) {
nkeynes@1091
   560
        /* Pointer is in old cache */
nkeynes@1091
   561
        region = (char *)xlat_old_cache;
nkeynes@1091
   562
        region_size = XLAT_OLD_CACHE_SIZE;
nkeynes@1091
   563
    }
nkeynes@1091
   564
#endif
nkeynes@1091
   565
    else {
nkeynes@1091
   566
        /* Not a valid cache pointer */
nkeynes@1091
   567
        return FALSE;
nkeynes@1091
   568
    }
nkeynes@1091
   569
nkeynes@1091
   570
    /* Make sure the whole block is in the region */
nkeynes@1091
   571
    if( (((char *)p) - region) >= region_size ||
nkeynes@1091
   572
        (((char *)(NEXT(block))) - region) >= region_size )
nkeynes@1091
   573
        return FALSE;
nkeynes@1091
   574
    return TRUE;
nkeynes@1091
   575
}
nkeynes@1091
   576
nkeynes@991
   577
void xlat_check_integrity( )
nkeynes@991
   578
{
nkeynes@991
   579
    xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
nkeynes@991
   580
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
   581
    xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
nkeynes@991
   582
    xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
nkeynes@991
   583
#endif
nkeynes@991
   584
}
nkeynes@991
   585
nkeynes@1182
   586
unsigned int xlat_get_active_block_count()
nkeynes@1182
   587
{
nkeynes@1182
   588
    unsigned int count = 0;
nkeynes@1182
   589
    xlat_cache_block_t ptr = xlat_new_cache;
nkeynes@1182
   590
    while( ptr->size != 0 ) {
nkeynes@1182
   591
        if( ptr->active != 0 ) {
nkeynes@1182
   592
            count++;
nkeynes@1182
   593
        }
nkeynes@1182
   594
        ptr = NEXT(ptr);
nkeynes@1182
   595
    }
nkeynes@1182
   596
    return count;
nkeynes@1182
   597
}
nkeynes@1182
   598
nkeynes@1188
   599
unsigned int xlat_get_active_blocks( struct xlat_block_ref *blocks, unsigned int size )
nkeynes@1182
   600
{
nkeynes@1182
   601
    unsigned int count = 0;
nkeynes@1182
   602
    xlat_cache_block_t ptr = xlat_new_cache;
nkeynes@1182
   603
    while( ptr->size != 0 ) {
nkeynes@1182
   604
        if( ptr->active != 0 ) {
nkeynes@1182
   605
            blocks[count].block = ptr;
nkeynes@1188
   606
            blocks[count].pc = 0;
nkeynes@1182
   607
            count++;
nkeynes@1182
   608
        }
nkeynes@1182
   609
        if( count >= size )
nkeynes@1182
   610
            break;
nkeynes@1182
   611
        ptr = NEXT(ptr);
nkeynes@1182
   612
    }
nkeynes@1182
   613
    return count;
nkeynes@1182
   614
}
nkeynes@1182
   615
nkeynes@1189
   616
static void xlat_get_block_pcs( struct xlat_block_ref *blocks, unsigned int size )
nkeynes@1182
   617
{
nkeynes@1182
   618
    unsigned i;
nkeynes@1182
   619
    for( i=0; i<XLAT_LUT_PAGES;i ++ ) {
nkeynes@1182
   620
        void **page = xlat_lut[i];
nkeynes@1182
   621
        if( page != NULL ) {
nkeynes@1182
   622
            for( unsigned j=0; j < XLAT_LUT_PAGE_ENTRIES; j++ ) {
nkeynes@1182
   623
                void *code = (void *)(((uintptr_t)(page[j])) & (~((uintptr_t)0x03)));
nkeynes@1182
   624
                if( code != NULL ) {
nkeynes@1182
   625
                    xlat_cache_block_t ptr = XLAT_BLOCK_FOR_CODE(code);
nkeynes@1182
   626
                    sh4addr_t pc = XLAT_ADDR_FROM_ENTRY(i,j);
nkeynes@1182
   627
                    for( unsigned k=0; k<size; k++ ) {
nkeynes@1182
   628
                        if( blocks[k].block == ptr ) {
nkeynes@1188
   629
                            blocks[k].pc = pc;
nkeynes@1182
   630
                            ptr = ptr->chain;
nkeynes@1182
   631
                            if( ptr == NULL )
nkeynes@1182
   632
                                break;
nkeynes@1182
   633
                            else {
nkeynes@1182
   634
                                ptr = XLAT_BLOCK_FOR_CODE(ptr);
nkeynes@1182
   635
                                k = 0;
nkeynes@1182
   636
                            }
nkeynes@1182
   637
                        }
nkeynes@1182
   638
                    }
nkeynes@1182
   639
                }
nkeynes@1182
   640
            }
nkeynes@1182
   641
        }
nkeynes@1182
   642
    }
nkeynes@1182
   643
}
nkeynes@1182
   644
nkeynes@1182
   645
static int xlat_compare_active_field( const void *a, const void *b )
nkeynes@1182
   646
{
nkeynes@1188
   647
    const struct xlat_block_ref *ptra = (const struct xlat_block_ref *)a;
nkeynes@1188
   648
    const struct xlat_block_ref *ptrb = (const struct xlat_block_ref *)b;
nkeynes@1182
   649
    return ptrb->block->active - ptra->block->active;
nkeynes@1182
   650
}
nkeynes@1182
   651
nkeynes@1188
   652
unsigned int xlat_get_cache_blocks_by_activity( xlat_block_ref_t outblocks, size_t topN )
nkeynes@1182
   653
{
nkeynes@1182
   654
    int i=0;
nkeynes@1182
   655
    int count = xlat_get_active_block_count();
nkeynes@1182
   656
nkeynes@1188
   657
    struct xlat_block_ref blocks[count];
nkeynes@1182
   658
    xlat_get_active_blocks(blocks, count);
nkeynes@1189
   659
    xlat_get_block_pcs(blocks,count);
nkeynes@1188
   660
    qsort(blocks, count, sizeof(struct xlat_block_ref), xlat_compare_active_field);
nkeynes@1182
   661
nkeynes@1188
   662
    if( topN > count )
nkeynes@1182
   663
        topN = count;
nkeynes@1188
   664
    memcpy(outblocks, blocks, topN*sizeof(struct xlat_block_ref));
nkeynes@1188
   665
    return topN;
nkeynes@1182
   666
}
.