Search
lxdream.org :: lxdream/src/xlat/xltcache.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/xlat/xltcache.c
changeset 1214:49152b3d8b75
prev1195:072131b61d2a
next1263:b3de98d19faf
author nkeynes
date Sat Mar 03 15:52:59 2012 +1000 (12 years ago)
permissions -rw-r--r--
last change Swap between run + pause icons when pressed
file annotate diff log raw
nkeynes@991
     1
/**
nkeynes@991
     2
 * $Id$
nkeynes@991
     3
 * 
nkeynes@991
     4
 * Translation cache management. This part is architecture independent.
nkeynes@991
     5
 *
nkeynes@991
     6
 * Copyright (c) 2005 Nathan Keynes.
nkeynes@991
     7
 *
nkeynes@991
     8
 * This program is free software; you can redistribute it and/or modify
nkeynes@991
     9
 * it under the terms of the GNU General Public License as published by
nkeynes@991
    10
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@991
    11
 * (at your option) any later version.
nkeynes@991
    12
 *
nkeynes@991
    13
 * This program is distributed in the hope that it will be useful,
nkeynes@991
    14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@991
    15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@991
    16
 * GNU General Public License for more details.
nkeynes@991
    17
 */
nkeynes@991
    18
nkeynes@991
    19
#include <sys/types.h>
nkeynes@991
    20
#include <sys/mman.h>
nkeynes@991
    21
#include <assert.h>
nkeynes@991
    22
nkeynes@991
    23
#include "dreamcast.h"
nkeynes@991
    24
#include "sh4/sh4core.h"
nkeynes@1186
    25
#include "sh4/sh4trans.h"
nkeynes@991
    26
#include "xlat/xltcache.h"
nkeynes@991
    27
#include "x86dasm/x86dasm.h"
nkeynes@991
    28
nkeynes@991
    29
#define XLAT_LUT_PAGE_BITS 12
nkeynes@991
    30
#define XLAT_LUT_TOTAL_BITS 28
nkeynes@991
    31
#define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
nkeynes@991
    32
#define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
nkeynes@991
    33
nkeynes@991
    34
#define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
nkeynes@991
    35
#define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
nkeynes@991
    36
#define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
nkeynes@991
    37
nkeynes@991
    38
#define XLAT_LUT_ENTRY_EMPTY (void *)0
nkeynes@991
    39
#define XLAT_LUT_ENTRY_USED  (void *)1
nkeynes@991
    40
nkeynes@1182
    41
#define XLAT_ADDR_FROM_ENTRY(pagenum,entrynum) ((((pagenum)&0xFFFF)<<13)|(((entrynum)<<1)&0x1FFE))
nkeynes@1182
    42
nkeynes@991
    43
#define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
nkeynes@991
    44
#define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
nkeynes@991
    45
#define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
nkeynes@1195
    46
#define IS_ENTRY_CONTINUATION(ent) (((uintptr_t)ent) & ((uintptr_t)XLAT_LUT_ENTRY_USED))
nkeynes@1195
    47
#define IS_FIRST_ENTRY_IN_PAGE(addr) (((addr)&0x1FFE) == 0)
nkeynes@1195
    48
#define XLAT_CODE_ADDR(ent) ((void *)(((uintptr_t)ent) & (~((uintptr_t)0x03))))
nkeynes@1195
    49
#define XLAT_BLOCK_FOR_LUT_ENTRY(ent) XLAT_BLOCK_FOR_CODE(XLAT_CODE_ADDR(ent))
nkeynes@1195
    50
nkeynes@991
    51
nkeynes@991
    52
#define MIN_BLOCK_SIZE 32
nkeynes@991
    53
#define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
nkeynes@991
    54
nkeynes@991
    55
#define BLOCK_INACTIVE 0
nkeynes@991
    56
#define BLOCK_ACTIVE 1
nkeynes@991
    57
#define BLOCK_USED 2
nkeynes@991
    58
nkeynes@991
    59
xlat_cache_block_t xlat_new_cache;
nkeynes@991
    60
xlat_cache_block_t xlat_new_cache_ptr;
nkeynes@991
    61
xlat_cache_block_t xlat_new_create_ptr;
nkeynes@991
    62
nkeynes@991
    63
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
    64
xlat_cache_block_t xlat_temp_cache;
nkeynes@991
    65
xlat_cache_block_t xlat_temp_cache_ptr;
nkeynes@991
    66
xlat_cache_block_t xlat_old_cache;
nkeynes@991
    67
xlat_cache_block_t xlat_old_cache_ptr;
nkeynes@991
    68
#endif
nkeynes@991
    69
nkeynes@991
    70
static void **xlat_lut[XLAT_LUT_PAGES];
nkeynes@991
    71
static gboolean xlat_initialized = FALSE;
nkeynes@1214
    72
static xlat_target_fns_t xlat_target = NULL;
nkeynes@991
    73
nkeynes@991
    74
void xlat_cache_init(void) 
nkeynes@991
    75
{
nkeynes@991
    76
    if( !xlat_initialized ) {
nkeynes@991
    77
        xlat_initialized = TRUE;
nkeynes@991
    78
        xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@991
    79
                MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@991
    80
        xlat_new_cache_ptr = xlat_new_cache;
nkeynes@991
    81
        xlat_new_create_ptr = xlat_new_cache;
nkeynes@991
    82
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
    83
        xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@991
    84
                MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@991
    85
        xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@991
    86
                MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@991
    87
        xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@991
    88
        xlat_old_cache_ptr = xlat_old_cache;
nkeynes@991
    89
#endif
nkeynes@991
    90
//        xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
nkeynes@991
    91
//                MAP_PRIVATE|MAP_ANON, -1, 0);
nkeynes@991
    92
        memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
nkeynes@991
    93
    }
nkeynes@991
    94
    xlat_flush_cache();
nkeynes@991
    95
}
nkeynes@991
    96
nkeynes@1214
    97
void xlat_set_target_fns( xlat_target_fns_t target )
nkeynes@1214
    98
{
nkeynes@1214
    99
    xlat_target = target;
nkeynes@1214
   100
}
nkeynes@1214
   101
nkeynes@991
   102
/**
nkeynes@991
   103
 * Reset the cache structure to its default state
nkeynes@991
   104
 */
nkeynes@991
   105
void xlat_flush_cache() 
nkeynes@991
   106
{
nkeynes@991
   107
    xlat_cache_block_t tmp;
nkeynes@991
   108
    int i;
nkeynes@991
   109
    xlat_new_cache_ptr = xlat_new_cache;
nkeynes@991
   110
    xlat_new_cache_ptr->active = 0;
nkeynes@991
   111
    xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@991
   112
    tmp = NEXT(xlat_new_cache_ptr);
nkeynes@991
   113
    tmp->active = 1;
nkeynes@991
   114
    tmp->size = 0;
nkeynes@991
   115
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
   116
    xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@991
   117
    xlat_temp_cache_ptr->active = 0;
nkeynes@991
   118
    xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@991
   119
    tmp = NEXT(xlat_temp_cache_ptr);
nkeynes@991
   120
    tmp->active = 1;
nkeynes@991
   121
    tmp->size = 0;
nkeynes@991
   122
    xlat_old_cache_ptr = xlat_old_cache;
nkeynes@991
   123
    xlat_old_cache_ptr->active = 0;
nkeynes@991
   124
    xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@991
   125
    tmp = NEXT(xlat_old_cache_ptr);
nkeynes@991
   126
    tmp->active = 1;
nkeynes@991
   127
    tmp->size = 0;
nkeynes@991
   128
#endif
nkeynes@991
   129
    for( i=0; i<XLAT_LUT_PAGES; i++ ) {
nkeynes@991
   130
        if( xlat_lut[i] != NULL ) {
nkeynes@991
   131
            memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
nkeynes@991
   132
        }
nkeynes@991
   133
    }
nkeynes@991
   134
}
nkeynes@991
   135
nkeynes@1186
   136
void xlat_delete_block( xlat_cache_block_t block )
nkeynes@1186
   137
{
nkeynes@1186
   138
    block->active = 0;
nkeynes@1186
   139
    *block->lut_entry = block->chain;
nkeynes@1214
   140
    if( block->use_list != NULL )
nkeynes@1214
   141
        xlat_target->unlink_block(block->use_list);
nkeynes@1186
   142
}
nkeynes@1186
   143
nkeynes@991
   144
static void xlat_flush_page_by_lut( void **page )
nkeynes@991
   145
{
nkeynes@991
   146
    int i;
nkeynes@991
   147
    for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
nkeynes@991
   148
        if( IS_ENTRY_POINT(page[i]) ) {
nkeynes@1195
   149
            void *p = XLAT_CODE_ADDR(page[i]);
nkeynes@1149
   150
            do {
nkeynes@1149
   151
                xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
nkeynes@1186
   152
                xlat_delete_block(block);
nkeynes@1149
   153
                p = block->chain;
nkeynes@1149
   154
            } while( p != NULL );
nkeynes@991
   155
        }
nkeynes@991
   156
        page[i] = NULL;
nkeynes@991
   157
    }
nkeynes@991
   158
}
nkeynes@991
   159
nkeynes@991
   160
void FASTCALL xlat_invalidate_word( sh4addr_t addr )
nkeynes@991
   161
{
nkeynes@991
   162
    void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
nkeynes@991
   163
    if( page != NULL ) {
nkeynes@991
   164
        int entry = XLAT_LUT_ENTRY(addr);
nkeynes@1195
   165
        if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
nkeynes@1195
   166
            /* First entry may be a delay-slot for the previous page */
nkeynes@1195
   167
            xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
nkeynes@1195
   168
        }
nkeynes@991
   169
        if( page[entry] != NULL ) {
nkeynes@991
   170
            xlat_flush_page_by_lut(page);
nkeynes@991
   171
        }
nkeynes@991
   172
    }
nkeynes@991
   173
}
nkeynes@991
   174
nkeynes@991
   175
void FASTCALL xlat_invalidate_long( sh4addr_t addr )
nkeynes@991
   176
{
nkeynes@991
   177
    void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
nkeynes@991
   178
    if( page != NULL ) {
nkeynes@991
   179
        int entry = XLAT_LUT_ENTRY(addr);
nkeynes@1195
   180
        if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
nkeynes@1195
   181
            /* First entry may be a delay-slot for the previous page */
nkeynes@1195
   182
            xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
nkeynes@1195
   183
        }
nkeynes@991
   184
        if( *(uint64_t *)&page[entry] != 0 ) {
nkeynes@991
   185
            xlat_flush_page_by_lut(page);
nkeynes@991
   186
        }
nkeynes@991
   187
    }
nkeynes@991
   188
}
nkeynes@991
   189
nkeynes@991
   190
void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
nkeynes@991
   191
{
nkeynes@991
   192
    int i;
nkeynes@991
   193
    int entry_count = size >> 1; // words;
nkeynes@991
   194
    uint32_t page_no = XLAT_LUT_PAGE(address);
nkeynes@991
   195
    int entry = XLAT_LUT_ENTRY(address);
nkeynes@1195
   196
nkeynes@1195
   197
    if( entry == 0 && xlat_lut[page_no] != NULL && IS_ENTRY_CONTINUATION(xlat_lut[page_no][entry])) {
nkeynes@1195
   198
        /* First entry may be a delay-slot for the previous page */
nkeynes@1195
   199
        xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(address-2)]);
nkeynes@1195
   200
    }
nkeynes@991
   201
    do {
nkeynes@991
   202
        void **page = xlat_lut[page_no];
nkeynes@991
   203
        int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
nkeynes@991
   204
        if( entry_count < page_entries ) {
nkeynes@991
   205
            page_entries = entry_count;
nkeynes@991
   206
        }
nkeynes@991
   207
        if( page != NULL ) {
nkeynes@991
   208
            if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
nkeynes@991
   209
                /* Overwriting the entire page anyway */
nkeynes@991
   210
                xlat_flush_page_by_lut(page);
nkeynes@991
   211
            } else {
nkeynes@991
   212
                for( i=entry; i<entry+page_entries; i++ ) {
nkeynes@991
   213
                    if( page[i] != NULL ) {
nkeynes@991
   214
                        xlat_flush_page_by_lut(page);
nkeynes@991
   215
                        break;
nkeynes@991
   216
                    }
nkeynes@991
   217
                }
nkeynes@991
   218
            }
nkeynes@991
   219
            entry_count -= page_entries;
nkeynes@991
   220
        }
nkeynes@991
   221
        page_no ++;
nkeynes@991
   222
        entry_count -= page_entries;
nkeynes@991
   223
        entry = 0;
nkeynes@991
   224
    } while( entry_count > 0 );
nkeynes@991
   225
}
nkeynes@991
   226
nkeynes@991
   227
void FASTCALL xlat_flush_page( sh4addr_t address )
nkeynes@991
   228
{
nkeynes@991
   229
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@991
   230
    if( page != NULL ) {
nkeynes@991
   231
        xlat_flush_page_by_lut(page);
nkeynes@991
   232
    }
nkeynes@991
   233
}
nkeynes@991
   234
nkeynes@991
   235
void * FASTCALL xlat_get_code( sh4addr_t address )
nkeynes@991
   236
{
nkeynes@991
   237
    void *result = NULL;
nkeynes@991
   238
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@991
   239
    if( page != NULL ) {
nkeynes@1195
   240
        result = XLAT_CODE_ADDR(page[XLAT_LUT_ENTRY(address)]);
nkeynes@991
   241
    }
nkeynes@991
   242
    return result;
nkeynes@991
   243
}
nkeynes@991
   244
nkeynes@991
   245
xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
nkeynes@991
   246
{
nkeynes@991
   247
    if( code != NULL ) {
nkeynes@991
   248
        uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
nkeynes@991
   249
        xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
nkeynes@991
   250
        uint32_t count = block->recover_table_size;
nkeynes@991
   251
        xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
nkeynes@991
   252
        uint32_t posn;
nkeynes@991
   253
        for( posn = 1; posn < count; posn++ ) {
nkeynes@1003
   254
        	if( records[posn].xlat_offset >= pc_offset ) {
nkeynes@991
   255
        		return &records[posn-1];
nkeynes@991
   256
        	}
nkeynes@991
   257
        }
nkeynes@991
   258
        return &records[count-1];
nkeynes@991
   259
    }
nkeynes@991
   260
    return NULL;	
nkeynes@991
   261
}
nkeynes@991
   262
nkeynes@1195
   263
static void **xlat_get_lut_page( sh4addr_t address )
nkeynes@991
   264
{
nkeynes@991
   265
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@991
   266
nkeynes@1195
   267
     /* Add the LUT entry for the block */
nkeynes@1195
   268
     if( page == NULL ) {
nkeynes@1195
   269
         xlat_lut[XLAT_LUT_PAGE(address)] = page =
nkeynes@1195
   270
             (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
nkeynes@1195
   271
                     MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@1195
   272
         memset( page, 0, XLAT_LUT_PAGE_SIZE );
nkeynes@1195
   273
     }
nkeynes@991
   274
nkeynes@1195
   275
     return page;
nkeynes@1195
   276
}
nkeynes@1195
   277
nkeynes@1195
   278
void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
nkeynes@1195
   279
{
nkeynes@1195
   280
    void **page = xlat_get_lut_page(address);
nkeynes@991
   281
    return &page[XLAT_LUT_ENTRY(address)];
nkeynes@991
   282
}
nkeynes@991
   283
nkeynes@991
   284
nkeynes@991
   285
nkeynes@991
   286
uint32_t FASTCALL xlat_get_block_size( void *block )
nkeynes@991
   287
{
nkeynes@991
   288
    xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
nkeynes@991
   289
    return xlt->size;
nkeynes@991
   290
}
nkeynes@991
   291
nkeynes@991
   292
uint32_t FASTCALL xlat_get_code_size( void *block )
nkeynes@991
   293
{
nkeynes@991
   294
    xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
nkeynes@991
   295
    if( xlt->recover_table_offset == 0 ) {
nkeynes@991
   296
        return xlt->size;
nkeynes@991
   297
    } else {
nkeynes@991
   298
        return xlt->recover_table_offset;
nkeynes@991
   299
    }
nkeynes@991
   300
}
nkeynes@991
   301
nkeynes@991
   302
/**
nkeynes@991
   303
 * Cut the specified block so that it has the given size, with the remaining data
nkeynes@991
   304
 * forming a new free block. If the free block would be less than the minimum size,
nkeynes@991
   305
 * the cut is not performed.
nkeynes@991
   306
 * @return the next block after the (possibly cut) block.
nkeynes@991
   307
 */
nkeynes@991
   308
static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
nkeynes@991
   309
{
nkeynes@991
   310
    cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
nkeynes@991
   311
    assert( cutsize <= block->size );
nkeynes@991
   312
    if( block->size > cutsize + MIN_TOTAL_SIZE ) {
nkeynes@991
   313
        int oldsize = block->size;
nkeynes@991
   314
        block->size = cutsize;
nkeynes@991
   315
        xlat_cache_block_t next = NEXT(block);
nkeynes@991
   316
        next->active = 0;
nkeynes@991
   317
        next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
nkeynes@991
   318
        return next;
nkeynes@991
   319
    } else {
nkeynes@991
   320
        return NEXT(block);
nkeynes@991
   321
    }
nkeynes@991
   322
}
nkeynes@991
   323
nkeynes@991
   324
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
   325
/**
nkeynes@991
   326
 * Promote a block in temp space (or elsewhere for that matter) to old space.
nkeynes@991
   327
 *
nkeynes@991
   328
 * @param block to promote.
nkeynes@991
   329
 */
nkeynes@991
   330
static void xlat_promote_to_old_space( xlat_cache_block_t block )
nkeynes@991
   331
{
nkeynes@991
   332
    int allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   333
    int size = block->size;
nkeynes@991
   334
    xlat_cache_block_t curr = xlat_old_cache_ptr;
nkeynes@991
   335
    xlat_cache_block_t start_block = curr;
nkeynes@991
   336
    do {
nkeynes@991
   337
        allocation += curr->size + sizeof(struct xlat_cache_block);
nkeynes@991
   338
        curr = NEXT(curr);
nkeynes@991
   339
        if( allocation > size ) {
nkeynes@991
   340
            break; /* done */
nkeynes@991
   341
        }
nkeynes@991
   342
        if( curr->size == 0 ) { /* End-of-cache Sentinel */
nkeynes@991
   343
            /* Leave what we just released as free space and start again from the
nkeynes@991
   344
             * top of the cache
nkeynes@991
   345
             */
nkeynes@991
   346
            start_block->active = 0;
nkeynes@991
   347
            start_block->size = allocation;
nkeynes@991
   348
            allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   349
            start_block = curr = xlat_old_cache;
nkeynes@991
   350
        }
nkeynes@991
   351
    } while(1);
nkeynes@991
   352
    start_block->active = 1;
nkeynes@991
   353
    start_block->size = allocation;
nkeynes@991
   354
    start_block->lut_entry = block->lut_entry;
nkeynes@1149
   355
    start_block->chain = block->chain;
nkeynes@991
   356
    start_block->fpscr_mask = block->fpscr_mask;
nkeynes@991
   357
    start_block->fpscr = block->fpscr;
nkeynes@991
   358
    start_block->recover_table_offset = block->recover_table_offset;
nkeynes@991
   359
    start_block->recover_table_size = block->recover_table_size;
nkeynes@991
   360
    *block->lut_entry = &start_block->code;
nkeynes@991
   361
    memcpy( start_block->code, block->code, block->size );
nkeynes@991
   362
    xlat_old_cache_ptr = xlat_cut_block(start_block, size );
nkeynes@991
   363
    if( xlat_old_cache_ptr->size == 0 ) {
nkeynes@991
   364
        xlat_old_cache_ptr = xlat_old_cache;
nkeynes@991
   365
    }
nkeynes@991
   366
}
nkeynes@991
   367
nkeynes@991
   368
/**
nkeynes@991
   369
 * Similarly to the above method, promotes a block to temp space.
nkeynes@991
   370
 * TODO: Try to combine these - they're nearly identical
nkeynes@991
   371
 */
nkeynes@991
   372
void xlat_promote_to_temp_space( xlat_cache_block_t block )
nkeynes@991
   373
{
nkeynes@991
   374
    int size = block->size;
nkeynes@991
   375
    int allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   376
    xlat_cache_block_t curr = xlat_temp_cache_ptr;
nkeynes@991
   377
    xlat_cache_block_t start_block = curr;
nkeynes@991
   378
    do {
nkeynes@991
   379
        if( curr->active == BLOCK_USED ) {
nkeynes@991
   380
            xlat_promote_to_old_space( curr );
nkeynes@991
   381
        } else if( curr->active == BLOCK_ACTIVE ) {
nkeynes@991
   382
            // Active but not used, release block
nkeynes@991
   383
            *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
nkeynes@991
   384
        }
nkeynes@991
   385
        allocation += curr->size + sizeof(struct xlat_cache_block);
nkeynes@991
   386
        curr = NEXT(curr);
nkeynes@991
   387
        if( allocation > size ) {
nkeynes@991
   388
            break; /* done */
nkeynes@991
   389
        }
nkeynes@991
   390
        if( curr->size == 0 ) { /* End-of-cache Sentinel */
nkeynes@991
   391
            /* Leave what we just released as free space and start again from the
nkeynes@991
   392
             * top of the cache
nkeynes@991
   393
             */
nkeynes@991
   394
            start_block->active = 0;
nkeynes@991
   395
            start_block->size = allocation;
nkeynes@991
   396
            allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   397
            start_block = curr = xlat_temp_cache;
nkeynes@991
   398
        }
nkeynes@991
   399
    } while(1);
nkeynes@991
   400
    start_block->active = 1;
nkeynes@991
   401
    start_block->size = allocation;
nkeynes@991
   402
    start_block->lut_entry = block->lut_entry;
nkeynes@1149
   403
    start_block->chain = block->chain;
nkeynes@991
   404
    start_block->fpscr_mask = block->fpscr_mask;
nkeynes@991
   405
    start_block->fpscr = block->fpscr;
nkeynes@991
   406
    start_block->recover_table_offset = block->recover_table_offset;
nkeynes@991
   407
    start_block->recover_table_size = block->recover_table_size;
nkeynes@991
   408
    *block->lut_entry = &start_block->code;
nkeynes@991
   409
    memcpy( start_block->code, block->code, block->size );
nkeynes@991
   410
    xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
nkeynes@991
   411
    if( xlat_temp_cache_ptr->size == 0 ) {
nkeynes@991
   412
        xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@991
   413
    }
nkeynes@991
   414
nkeynes@991
   415
}
nkeynes@991
   416
#else 
nkeynes@991
   417
void xlat_promote_to_temp_space( xlat_cache_block_t block )
nkeynes@991
   418
{
nkeynes@1186
   419
    *block->lut_entry = block->chain;
nkeynes@1186
   420
    xlat_delete_block(block);
nkeynes@991
   421
}
nkeynes@991
   422
#endif
nkeynes@991
   423
nkeynes@991
   424
/**
nkeynes@991
   425
 * Returns the next block in the new cache list that can be written to by the
nkeynes@991
   426
 * translator. If the next block is active, it is evicted first.
nkeynes@991
   427
 */
nkeynes@991
   428
xlat_cache_block_t xlat_start_block( sh4addr_t address )
nkeynes@991
   429
{
nkeynes@991
   430
    if( xlat_new_cache_ptr->size == 0 ) {
nkeynes@991
   431
        xlat_new_cache_ptr = xlat_new_cache;
nkeynes@991
   432
    }
nkeynes@991
   433
nkeynes@991
   434
    if( xlat_new_cache_ptr->active ) {
nkeynes@991
   435
        xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@991
   436
    }
nkeynes@991
   437
    xlat_new_create_ptr = xlat_new_cache_ptr;
nkeynes@991
   438
    xlat_new_create_ptr->active = 1;
nkeynes@991
   439
    xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@991
   440
nkeynes@991
   441
    /* Add the LUT entry for the block */
nkeynes@1195
   442
    void **p = xlat_get_lut_entry(address);
nkeynes@1195
   443
    void *entry = *p;
nkeynes@1195
   444
    if( IS_ENTRY_POINT(entry) ) {
nkeynes@1195
   445
        xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
nkeynes@1149
   446
        assert( oldblock->active );
nkeynes@1195
   447
        xlat_new_create_ptr->chain = XLAT_CODE_ADDR(entry);
nkeynes@1149
   448
    } else {
nkeynes@1149
   449
        xlat_new_create_ptr->chain = NULL;
nkeynes@991
   450
    }
nkeynes@1186
   451
    xlat_new_create_ptr->use_list = NULL;
nkeynes@991
   452
nkeynes@1195
   453
    *p = &xlat_new_create_ptr->code;
nkeynes@1195
   454
    if( IS_ENTRY_CONTINUATION(entry) ) {
nkeynes@1195
   455
        *((uintptr_t *)p) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
nkeynes@1195
   456
    }
nkeynes@1195
   457
    xlat_new_create_ptr->lut_entry = p;
nkeynes@991
   458
nkeynes@991
   459
    return xlat_new_create_ptr;
nkeynes@991
   460
}
nkeynes@991
   461
nkeynes@991
   462
xlat_cache_block_t xlat_extend_block( uint32_t newSize )
nkeynes@991
   463
{
nkeynes@1186
   464
    assert( xlat_new_create_ptr->use_list == NULL );
nkeynes@991
   465
    while( xlat_new_create_ptr->size < newSize ) {
nkeynes@991
   466
        if( xlat_new_cache_ptr->size == 0 ) {
nkeynes@991
   467
            /* Migrate to the front of the cache to keep it contiguous */
nkeynes@991
   468
            xlat_new_create_ptr->active = 0;
nkeynes@991
   469
            sh4ptr_t olddata = xlat_new_create_ptr->code;
nkeynes@991
   470
            int oldsize = xlat_new_create_ptr->size;
nkeynes@991
   471
            int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
nkeynes@991
   472
            void **lut_entry = xlat_new_create_ptr->lut_entry;
nkeynes@1149
   473
            void *chain = xlat_new_create_ptr->chain;
nkeynes@991
   474
            int allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   475
            xlat_new_cache_ptr = xlat_new_cache;
nkeynes@991
   476
            do {
nkeynes@991
   477
                if( xlat_new_cache_ptr->active ) {
nkeynes@991
   478
                    xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@991
   479
                }
nkeynes@991
   480
                allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
nkeynes@991
   481
                xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@991
   482
            } while( allocation < size );
nkeynes@991
   483
            xlat_new_create_ptr = xlat_new_cache;
nkeynes@991
   484
            xlat_new_create_ptr->active = 1;
nkeynes@991
   485
            xlat_new_create_ptr->size = allocation;
nkeynes@991
   486
            xlat_new_create_ptr->lut_entry = lut_entry;
nkeynes@1149
   487
            xlat_new_create_ptr->chain = chain;
nkeynes@1186
   488
            xlat_new_create_ptr->use_list = NULL;
nkeynes@991
   489
            *lut_entry = &xlat_new_create_ptr->code;
nkeynes@991
   490
            memmove( xlat_new_create_ptr->code, olddata, oldsize );
nkeynes@991
   491
        } else {
nkeynes@991
   492
            if( xlat_new_cache_ptr->active ) {
nkeynes@991
   493
                xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@991
   494
            }
nkeynes@991
   495
            xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
nkeynes@991
   496
            xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@991
   497
        }
nkeynes@991
   498
    }
nkeynes@991
   499
    return xlat_new_create_ptr;
nkeynes@991
   500
nkeynes@991
   501
}
nkeynes@991
   502
nkeynes@1195
   503
void xlat_commit_block( uint32_t destsize, sh4addr_t startpc, sh4addr_t endpc )
nkeynes@991
   504
{
nkeynes@1195
   505
    void **entry = xlat_get_lut_entry(startpc+2);
nkeynes@1195
   506
    /* assume main entry has already been set at this point */
nkeynes@1195
   507
nkeynes@1195
   508
    for( sh4addr_t pc = startpc+2; pc < endpc; pc += 2 ) {
nkeynes@1195
   509
        if( XLAT_LUT_ENTRY(pc) == 0 )
nkeynes@1195
   510
            entry = xlat_get_lut_entry(pc);
nkeynes@1195
   511
        *((uintptr_t *)entry) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
nkeynes@1195
   512
        entry++;
nkeynes@991
   513
    }
nkeynes@991
   514
nkeynes@991
   515
    xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
nkeynes@991
   516
}
nkeynes@991
   517
nkeynes@991
   518
void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
nkeynes@991
   519
{
nkeynes@991
   520
    int foundptr = 0;
nkeynes@991
   521
    xlat_cache_block_t tail = 
nkeynes@991
   522
        (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
nkeynes@991
   523
nkeynes@991
   524
    assert( tail->active == 1 );
nkeynes@991
   525
    assert( tail->size == 0 ); 
nkeynes@991
   526
    while( cache < tail ) {
nkeynes@991
   527
        assert( cache->active >= 0 && cache->active <= 2 );
nkeynes@991
   528
        assert( cache->size >= 0 && cache->size < size );
nkeynes@991
   529
        if( cache == ptr ) {
nkeynes@991
   530
            foundptr = 1;
nkeynes@991
   531
        }
nkeynes@991
   532
        cache = NEXT(cache);
nkeynes@991
   533
    }
nkeynes@991
   534
    assert( cache == tail );
nkeynes@991
   535
    assert( foundptr == 1 || tail == ptr );
nkeynes@991
   536
}
nkeynes@991
   537
nkeynes@1091
   538
/**
nkeynes@1175
   539
 * Perform a reverse lookup to determine the SH4 address corresponding to
nkeynes@1175
   540
 * the start of the code block containing ptr. This is _slow_ - it does a
nkeynes@1175
   541
 * linear scan of the lookup table to find this.
nkeynes@1175
   542
 *
nkeynes@1175
   543
 * If the pointer cannot be found in any live block, returns -1 (as this
nkeynes@1175
   544
 * is not a legal PC)
nkeynes@1175
   545
 */
nkeynes@1175
   546
sh4addr_t xlat_get_address( unsigned char *ptr )
nkeynes@1175
   547
{
nkeynes@1175
   548
    int i,j;
nkeynes@1175
   549
    for( i=0; i<XLAT_LUT_PAGES; i++ ) {
nkeynes@1175
   550
        void **page = xlat_lut[i];
nkeynes@1175
   551
        if( page != NULL ) {
nkeynes@1175
   552
            for( j=0; j<XLAT_LUT_PAGE_ENTRIES; j++ ) {
nkeynes@1175
   553
                void *entry = page[j];
nkeynes@1186
   554
                if( ((uintptr_t)entry) > (uintptr_t)XLAT_LUT_ENTRY_USED ) {
nkeynes@1195
   555
                    xlat_cache_block_t block = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
nkeynes@1175
   556
                    if( ptr >= block->code && ptr < block->code + block->size) {
nkeynes@1175
   557
                        /* Found it */
nkeynes@1175
   558
                        return (i<<13) | (j<<1);
nkeynes@1175
   559
                    }
nkeynes@1175
   560
                }
nkeynes@1175
   561
            }
nkeynes@1175
   562
        }
nkeynes@1175
   563
    }
nkeynes@1175
   564
    return -1;
nkeynes@1175
   565
}
nkeynes@1175
   566
nkeynes@1175
   567
/**
nkeynes@1091
   568
 * Sanity check that the given pointer is at least contained in one of cache
nkeynes@1091
   569
 * regions, and has a sane-ish size. We don't do a full region walk atm.
nkeynes@1091
   570
 */
nkeynes@1091
   571
gboolean xlat_is_code_pointer( void *p )
nkeynes@1091
   572
{
nkeynes@1091
   573
    char *region;
nkeynes@1091
   574
    uintptr_t region_size;
nkeynes@1091
   575
nkeynes@1091
   576
    xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
nkeynes@1091
   577
    if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) {
nkeynes@1091
   578
         /* Pointer is in new cache */
nkeynes@1091
   579
        region = (char *)xlat_new_cache;
nkeynes@1091
   580
        region_size = XLAT_NEW_CACHE_SIZE;
nkeynes@1091
   581
    }
nkeynes@1091
   582
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@1091
   583
    else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) {
nkeynes@1091
   584
         /* Pointer is in temp cache */
nkeynes@1091
   585
        region = (char *)xlat_temp_cache;
nkeynes@1091
   586
        region_size = XLAT_TEMP_CACHE_SIZE;
nkeynes@1091
   587
    } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) {
nkeynes@1091
   588
        /* Pointer is in old cache */
nkeynes@1091
   589
        region = (char *)xlat_old_cache;
nkeynes@1091
   590
        region_size = XLAT_OLD_CACHE_SIZE;
nkeynes@1091
   591
    }
nkeynes@1091
   592
#endif
nkeynes@1091
   593
    else {
nkeynes@1091
   594
        /* Not a valid cache pointer */
nkeynes@1091
   595
        return FALSE;
nkeynes@1091
   596
    }
nkeynes@1091
   597
nkeynes@1091
   598
    /* Make sure the whole block is in the region */
nkeynes@1091
   599
    if( (((char *)p) - region) >= region_size ||
nkeynes@1091
   600
        (((char *)(NEXT(block))) - region) >= region_size )
nkeynes@1091
   601
        return FALSE;
nkeynes@1091
   602
    return TRUE;
nkeynes@1091
   603
}
nkeynes@1091
   604
nkeynes@991
   605
void xlat_check_integrity( )
nkeynes@991
   606
{
nkeynes@991
   607
    xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
nkeynes@991
   608
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
   609
    xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
nkeynes@991
   610
    xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
nkeynes@991
   611
#endif
nkeynes@991
   612
}
nkeynes@991
   613
nkeynes@1182
   614
unsigned int xlat_get_active_block_count()
nkeynes@1182
   615
{
nkeynes@1182
   616
    unsigned int count = 0;
nkeynes@1182
   617
    xlat_cache_block_t ptr = xlat_new_cache;
nkeynes@1182
   618
    while( ptr->size != 0 ) {
nkeynes@1182
   619
        if( ptr->active != 0 ) {
nkeynes@1182
   620
            count++;
nkeynes@1182
   621
        }
nkeynes@1182
   622
        ptr = NEXT(ptr);
nkeynes@1182
   623
    }
nkeynes@1182
   624
    return count;
nkeynes@1182
   625
}
nkeynes@1182
   626
nkeynes@1188
   627
unsigned int xlat_get_active_blocks( struct xlat_block_ref *blocks, unsigned int size )
nkeynes@1182
   628
{
nkeynes@1182
   629
    unsigned int count = 0;
nkeynes@1182
   630
    xlat_cache_block_t ptr = xlat_new_cache;
nkeynes@1182
   631
    while( ptr->size != 0 ) {
nkeynes@1182
   632
        if( ptr->active != 0 ) {
nkeynes@1182
   633
            blocks[count].block = ptr;
nkeynes@1188
   634
            blocks[count].pc = 0;
nkeynes@1182
   635
            count++;
nkeynes@1182
   636
        }
nkeynes@1182
   637
        if( count >= size )
nkeynes@1182
   638
            break;
nkeynes@1182
   639
        ptr = NEXT(ptr);
nkeynes@1182
   640
    }
nkeynes@1182
   641
    return count;
nkeynes@1182
   642
}
nkeynes@1182
   643
nkeynes@1189
   644
static void xlat_get_block_pcs( struct xlat_block_ref *blocks, unsigned int size )
nkeynes@1182
   645
{
nkeynes@1182
   646
    unsigned i;
nkeynes@1182
   647
    for( i=0; i<XLAT_LUT_PAGES;i ++ ) {
nkeynes@1182
   648
        void **page = xlat_lut[i];
nkeynes@1182
   649
        if( page != NULL ) {
nkeynes@1182
   650
            for( unsigned j=0; j < XLAT_LUT_PAGE_ENTRIES; j++ ) {
nkeynes@1195
   651
                void *code = XLAT_CODE_ADDR(page[j]);
nkeynes@1182
   652
                if( code != NULL ) {
nkeynes@1182
   653
                    xlat_cache_block_t ptr = XLAT_BLOCK_FOR_CODE(code);
nkeynes@1182
   654
                    sh4addr_t pc = XLAT_ADDR_FROM_ENTRY(i,j);
nkeynes@1182
   655
                    for( unsigned k=0; k<size; k++ ) {
nkeynes@1182
   656
                        if( blocks[k].block == ptr ) {
nkeynes@1188
   657
                            blocks[k].pc = pc;
nkeynes@1182
   658
                            ptr = ptr->chain;
nkeynes@1182
   659
                            if( ptr == NULL )
nkeynes@1182
   660
                                break;
nkeynes@1182
   661
                            else {
nkeynes@1182
   662
                                ptr = XLAT_BLOCK_FOR_CODE(ptr);
nkeynes@1182
   663
                                k = 0;
nkeynes@1182
   664
                            }
nkeynes@1182
   665
                        }
nkeynes@1182
   666
                    }
nkeynes@1182
   667
                }
nkeynes@1182
   668
            }
nkeynes@1182
   669
        }
nkeynes@1182
   670
    }
nkeynes@1182
   671
}
nkeynes@1182
   672
nkeynes@1182
   673
static int xlat_compare_active_field( const void *a, const void *b )
nkeynes@1182
   674
{
nkeynes@1188
   675
    const struct xlat_block_ref *ptra = (const struct xlat_block_ref *)a;
nkeynes@1188
   676
    const struct xlat_block_ref *ptrb = (const struct xlat_block_ref *)b;
nkeynes@1182
   677
    return ptrb->block->active - ptra->block->active;
nkeynes@1182
   678
}
nkeynes@1182
   679
nkeynes@1188
   680
unsigned int xlat_get_cache_blocks_by_activity( xlat_block_ref_t outblocks, size_t topN )
nkeynes@1182
   681
{
nkeynes@1182
   682
    int i=0;
nkeynes@1182
   683
    int count = xlat_get_active_block_count();
nkeynes@1182
   684
nkeynes@1188
   685
    struct xlat_block_ref blocks[count];
nkeynes@1182
   686
    xlat_get_active_blocks(blocks, count);
nkeynes@1189
   687
    xlat_get_block_pcs(blocks,count);
nkeynes@1188
   688
    qsort(blocks, count, sizeof(struct xlat_block_ref), xlat_compare_active_field);
nkeynes@1182
   689
nkeynes@1188
   690
    if( topN > count )
nkeynes@1182
   691
        topN = count;
nkeynes@1188
   692
    memcpy(outblocks, blocks, topN*sizeof(struct xlat_block_ref));
nkeynes@1188
   693
    return topN;
nkeynes@1182
   694
}
.