Search
lxdream.org :: lxdream/src/xlat/xltcache.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/xlat/xltcache.c
changeset 1195:072131b61d2a
prev1189:1540105786c8
next1214:49152b3d8b75
author nkeynes
date Sun Feb 12 16:30:26 2012 +1000 (12 years ago)
permissions -rw-r--r--
last change Add -Werror for mregparm check, so it actually fails if mregparm isn't
accepted
file annotate diff log raw
nkeynes@991
     1
/**
nkeynes@991
     2
 * $Id$
nkeynes@991
     3
 * 
nkeynes@991
     4
 * Translation cache management. This part is architecture independent.
nkeynes@991
     5
 *
nkeynes@991
     6
 * Copyright (c) 2005 Nathan Keynes.
nkeynes@991
     7
 *
nkeynes@991
     8
 * This program is free software; you can redistribute it and/or modify
nkeynes@991
     9
 * it under the terms of the GNU General Public License as published by
nkeynes@991
    10
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@991
    11
 * (at your option) any later version.
nkeynes@991
    12
 *
nkeynes@991
    13
 * This program is distributed in the hope that it will be useful,
nkeynes@991
    14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@991
    15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@991
    16
 * GNU General Public License for more details.
nkeynes@991
    17
 */
nkeynes@991
    18
nkeynes@991
    19
#include <sys/types.h>
nkeynes@991
    20
#include <sys/mman.h>
nkeynes@991
    21
#include <assert.h>
nkeynes@991
    22
nkeynes@991
    23
#include "dreamcast.h"
nkeynes@991
    24
#include "sh4/sh4core.h"
nkeynes@1186
    25
#include "sh4/sh4trans.h"
nkeynes@991
    26
#include "xlat/xltcache.h"
nkeynes@991
    27
#include "x86dasm/x86dasm.h"
nkeynes@991
    28
nkeynes@991
    29
#define XLAT_LUT_PAGE_BITS 12
nkeynes@991
    30
#define XLAT_LUT_TOTAL_BITS 28
nkeynes@991
    31
#define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
nkeynes@991
    32
#define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
nkeynes@991
    33
nkeynes@991
    34
#define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
nkeynes@991
    35
#define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
nkeynes@991
    36
#define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
nkeynes@991
    37
nkeynes@991
    38
#define XLAT_LUT_ENTRY_EMPTY (void *)0
nkeynes@991
    39
#define XLAT_LUT_ENTRY_USED  (void *)1
nkeynes@991
    40
nkeynes@1182
    41
#define XLAT_ADDR_FROM_ENTRY(pagenum,entrynum) ((((pagenum)&0xFFFF)<<13)|(((entrynum)<<1)&0x1FFE))
nkeynes@1182
    42
nkeynes@991
    43
#define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
nkeynes@991
    44
#define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
nkeynes@991
    45
#define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
nkeynes@1195
    46
#define IS_ENTRY_CONTINUATION(ent) (((uintptr_t)ent) & ((uintptr_t)XLAT_LUT_ENTRY_USED))
nkeynes@1195
    47
#define IS_FIRST_ENTRY_IN_PAGE(addr) (((addr)&0x1FFE) == 0)
nkeynes@1195
    48
#define XLAT_CODE_ADDR(ent) ((void *)(((uintptr_t)ent) & (~((uintptr_t)0x03))))
nkeynes@1195
    49
#define XLAT_BLOCK_FOR_LUT_ENTRY(ent) XLAT_BLOCK_FOR_CODE(XLAT_CODE_ADDR(ent))
nkeynes@1195
    50
nkeynes@991
    51
nkeynes@991
    52
#define MIN_BLOCK_SIZE 32
nkeynes@991
    53
#define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
nkeynes@991
    54
nkeynes@991
    55
#define BLOCK_INACTIVE 0
nkeynes@991
    56
#define BLOCK_ACTIVE 1
nkeynes@991
    57
#define BLOCK_USED 2
nkeynes@991
    58
nkeynes@991
    59
xlat_cache_block_t xlat_new_cache;
nkeynes@991
    60
xlat_cache_block_t xlat_new_cache_ptr;
nkeynes@991
    61
xlat_cache_block_t xlat_new_create_ptr;
nkeynes@991
    62
nkeynes@991
    63
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
    64
xlat_cache_block_t xlat_temp_cache;
nkeynes@991
    65
xlat_cache_block_t xlat_temp_cache_ptr;
nkeynes@991
    66
xlat_cache_block_t xlat_old_cache;
nkeynes@991
    67
xlat_cache_block_t xlat_old_cache_ptr;
nkeynes@991
    68
#endif
nkeynes@991
    69
nkeynes@991
    70
static void **xlat_lut[XLAT_LUT_PAGES];
nkeynes@991
    71
static gboolean xlat_initialized = FALSE;
nkeynes@991
    72
nkeynes@991
    73
void xlat_cache_init(void) 
nkeynes@991
    74
{
nkeynes@991
    75
    if( !xlat_initialized ) {
nkeynes@991
    76
        xlat_initialized = TRUE;
nkeynes@991
    77
        xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@991
    78
                MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@991
    79
        xlat_new_cache_ptr = xlat_new_cache;
nkeynes@991
    80
        xlat_new_create_ptr = xlat_new_cache;
nkeynes@991
    81
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
    82
        xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@991
    83
                MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@991
    84
        xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@991
    85
                MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@991
    86
        xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@991
    87
        xlat_old_cache_ptr = xlat_old_cache;
nkeynes@991
    88
#endif
nkeynes@991
    89
//        xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
nkeynes@991
    90
//                MAP_PRIVATE|MAP_ANON, -1, 0);
nkeynes@991
    91
        memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
nkeynes@991
    92
    }
nkeynes@991
    93
    xlat_flush_cache();
nkeynes@991
    94
}
nkeynes@991
    95
nkeynes@991
    96
/**
nkeynes@991
    97
 * Reset the cache structure to its default state
nkeynes@991
    98
 */
nkeynes@991
    99
void xlat_flush_cache() 
nkeynes@991
   100
{
nkeynes@991
   101
    xlat_cache_block_t tmp;
nkeynes@991
   102
    int i;
nkeynes@991
   103
    xlat_new_cache_ptr = xlat_new_cache;
nkeynes@991
   104
    xlat_new_cache_ptr->active = 0;
nkeynes@991
   105
    xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@991
   106
    tmp = NEXT(xlat_new_cache_ptr);
nkeynes@991
   107
    tmp->active = 1;
nkeynes@991
   108
    tmp->size = 0;
nkeynes@991
   109
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
   110
    xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@991
   111
    xlat_temp_cache_ptr->active = 0;
nkeynes@991
   112
    xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@991
   113
    tmp = NEXT(xlat_temp_cache_ptr);
nkeynes@991
   114
    tmp->active = 1;
nkeynes@991
   115
    tmp->size = 0;
nkeynes@991
   116
    xlat_old_cache_ptr = xlat_old_cache;
nkeynes@991
   117
    xlat_old_cache_ptr->active = 0;
nkeynes@991
   118
    xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@991
   119
    tmp = NEXT(xlat_old_cache_ptr);
nkeynes@991
   120
    tmp->active = 1;
nkeynes@991
   121
    tmp->size = 0;
nkeynes@991
   122
#endif
nkeynes@991
   123
    for( i=0; i<XLAT_LUT_PAGES; i++ ) {
nkeynes@991
   124
        if( xlat_lut[i] != NULL ) {
nkeynes@991
   125
            memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
nkeynes@991
   126
        }
nkeynes@991
   127
    }
nkeynes@991
   128
}
nkeynes@991
   129
nkeynes@1186
   130
void xlat_delete_block( xlat_cache_block_t block )
nkeynes@1186
   131
{
nkeynes@1186
   132
    block->active = 0;
nkeynes@1186
   133
    *block->lut_entry = block->chain;
nkeynes@1186
   134
    sh4_translate_unlink_block( block->use_list );
nkeynes@1186
   135
}
nkeynes@1186
   136
nkeynes@991
   137
static void xlat_flush_page_by_lut( void **page )
nkeynes@991
   138
{
nkeynes@991
   139
    int i;
nkeynes@991
   140
    for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
nkeynes@991
   141
        if( IS_ENTRY_POINT(page[i]) ) {
nkeynes@1195
   142
            void *p = XLAT_CODE_ADDR(page[i]);
nkeynes@1149
   143
            do {
nkeynes@1149
   144
                xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
nkeynes@1186
   145
                xlat_delete_block(block);
nkeynes@1149
   146
                p = block->chain;
nkeynes@1149
   147
            } while( p != NULL );
nkeynes@991
   148
        }
nkeynes@991
   149
        page[i] = NULL;
nkeynes@991
   150
    }
nkeynes@991
   151
}
nkeynes@991
   152
nkeynes@991
   153
void FASTCALL xlat_invalidate_word( sh4addr_t addr )
nkeynes@991
   154
{
nkeynes@991
   155
    void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
nkeynes@991
   156
    if( page != NULL ) {
nkeynes@991
   157
        int entry = XLAT_LUT_ENTRY(addr);
nkeynes@1195
   158
        if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
nkeynes@1195
   159
            /* First entry may be a delay-slot for the previous page */
nkeynes@1195
   160
            xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
nkeynes@1195
   161
        }
nkeynes@991
   162
        if( page[entry] != NULL ) {
nkeynes@991
   163
            xlat_flush_page_by_lut(page);
nkeynes@991
   164
        }
nkeynes@991
   165
    }
nkeynes@991
   166
}
nkeynes@991
   167
nkeynes@991
   168
void FASTCALL xlat_invalidate_long( sh4addr_t addr )
nkeynes@991
   169
{
nkeynes@991
   170
    void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
nkeynes@991
   171
    if( page != NULL ) {
nkeynes@991
   172
        int entry = XLAT_LUT_ENTRY(addr);
nkeynes@1195
   173
        if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
nkeynes@1195
   174
            /* First entry may be a delay-slot for the previous page */
nkeynes@1195
   175
            xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
nkeynes@1195
   176
        }
nkeynes@991
   177
        if( *(uint64_t *)&page[entry] != 0 ) {
nkeynes@991
   178
            xlat_flush_page_by_lut(page);
nkeynes@991
   179
        }
nkeynes@991
   180
    }
nkeynes@991
   181
}
nkeynes@991
   182
nkeynes@991
   183
void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
nkeynes@991
   184
{
nkeynes@991
   185
    int i;
nkeynes@991
   186
    int entry_count = size >> 1; // words;
nkeynes@991
   187
    uint32_t page_no = XLAT_LUT_PAGE(address);
nkeynes@991
   188
    int entry = XLAT_LUT_ENTRY(address);
nkeynes@1195
   189
nkeynes@1195
   190
    if( entry == 0 && xlat_lut[page_no] != NULL && IS_ENTRY_CONTINUATION(xlat_lut[page_no][entry])) {
nkeynes@1195
   191
        /* First entry may be a delay-slot for the previous page */
nkeynes@1195
   192
        xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(address-2)]);
nkeynes@1195
   193
    }
nkeynes@991
   194
    do {
nkeynes@991
   195
        void **page = xlat_lut[page_no];
nkeynes@991
   196
        int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
nkeynes@991
   197
        if( entry_count < page_entries ) {
nkeynes@991
   198
            page_entries = entry_count;
nkeynes@991
   199
        }
nkeynes@991
   200
        if( page != NULL ) {
nkeynes@991
   201
            if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
nkeynes@991
   202
                /* Overwriting the entire page anyway */
nkeynes@991
   203
                xlat_flush_page_by_lut(page);
nkeynes@991
   204
            } else {
nkeynes@991
   205
                for( i=entry; i<entry+page_entries; i++ ) {
nkeynes@991
   206
                    if( page[i] != NULL ) {
nkeynes@991
   207
                        xlat_flush_page_by_lut(page);
nkeynes@991
   208
                        break;
nkeynes@991
   209
                    }
nkeynes@991
   210
                }
nkeynes@991
   211
            }
nkeynes@991
   212
            entry_count -= page_entries;
nkeynes@991
   213
        }
nkeynes@991
   214
        page_no ++;
nkeynes@991
   215
        entry_count -= page_entries;
nkeynes@991
   216
        entry = 0;
nkeynes@991
   217
    } while( entry_count > 0 );
nkeynes@991
   218
}
nkeynes@991
   219
nkeynes@991
   220
void FASTCALL xlat_flush_page( sh4addr_t address )
nkeynes@991
   221
{
nkeynes@991
   222
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@991
   223
    if( page != NULL ) {
nkeynes@991
   224
        xlat_flush_page_by_lut(page);
nkeynes@991
   225
    }
nkeynes@991
   226
}
nkeynes@991
   227
nkeynes@991
   228
void * FASTCALL xlat_get_code( sh4addr_t address )
nkeynes@991
   229
{
nkeynes@991
   230
    void *result = NULL;
nkeynes@991
   231
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@991
   232
    if( page != NULL ) {
nkeynes@1195
   233
        result = XLAT_CODE_ADDR(page[XLAT_LUT_ENTRY(address)]);
nkeynes@991
   234
    }
nkeynes@991
   235
    return result;
nkeynes@991
   236
}
nkeynes@991
   237
nkeynes@991
   238
xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
nkeynes@991
   239
{
nkeynes@991
   240
    if( code != NULL ) {
nkeynes@991
   241
        uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
nkeynes@991
   242
        xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
nkeynes@991
   243
        uint32_t count = block->recover_table_size;
nkeynes@991
   244
        xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
nkeynes@991
   245
        uint32_t posn;
nkeynes@991
   246
        for( posn = 1; posn < count; posn++ ) {
nkeynes@1003
   247
        	if( records[posn].xlat_offset >= pc_offset ) {
nkeynes@991
   248
        		return &records[posn-1];
nkeynes@991
   249
        	}
nkeynes@991
   250
        }
nkeynes@991
   251
        return &records[count-1];
nkeynes@991
   252
    }
nkeynes@991
   253
    return NULL;	
nkeynes@991
   254
}
nkeynes@991
   255
nkeynes@1195
   256
static void **xlat_get_lut_page( sh4addr_t address )
nkeynes@991
   257
{
nkeynes@991
   258
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@991
   259
nkeynes@1195
   260
     /* Add the LUT entry for the block */
nkeynes@1195
   261
     if( page == NULL ) {
nkeynes@1195
   262
         xlat_lut[XLAT_LUT_PAGE(address)] = page =
nkeynes@1195
   263
             (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
nkeynes@1195
   264
                     MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@1195
   265
         memset( page, 0, XLAT_LUT_PAGE_SIZE );
nkeynes@1195
   266
     }
nkeynes@991
   267
nkeynes@1195
   268
     return page;
nkeynes@1195
   269
}
nkeynes@1195
   270
nkeynes@1195
   271
void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
nkeynes@1195
   272
{
nkeynes@1195
   273
    void **page = xlat_get_lut_page(address);
nkeynes@991
   274
    return &page[XLAT_LUT_ENTRY(address)];
nkeynes@991
   275
}
nkeynes@991
   276
nkeynes@991
   277
nkeynes@991
   278
nkeynes@991
   279
uint32_t FASTCALL xlat_get_block_size( void *block )
nkeynes@991
   280
{
nkeynes@991
   281
    xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
nkeynes@991
   282
    return xlt->size;
nkeynes@991
   283
}
nkeynes@991
   284
nkeynes@991
   285
uint32_t FASTCALL xlat_get_code_size( void *block )
nkeynes@991
   286
{
nkeynes@991
   287
    xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
nkeynes@991
   288
    if( xlt->recover_table_offset == 0 ) {
nkeynes@991
   289
        return xlt->size;
nkeynes@991
   290
    } else {
nkeynes@991
   291
        return xlt->recover_table_offset;
nkeynes@991
   292
    }
nkeynes@991
   293
}
nkeynes@991
   294
nkeynes@991
   295
/**
nkeynes@991
   296
 * Cut the specified block so that it has the given size, with the remaining data
nkeynes@991
   297
 * forming a new free block. If the free block would be less than the minimum size,
nkeynes@991
   298
 * the cut is not performed.
nkeynes@991
   299
 * @return the next block after the (possibly cut) block.
nkeynes@991
   300
 */
nkeynes@991
   301
static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
nkeynes@991
   302
{
nkeynes@991
   303
    cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
nkeynes@991
   304
    assert( cutsize <= block->size );
nkeynes@991
   305
    if( block->size > cutsize + MIN_TOTAL_SIZE ) {
nkeynes@991
   306
        int oldsize = block->size;
nkeynes@991
   307
        block->size = cutsize;
nkeynes@991
   308
        xlat_cache_block_t next = NEXT(block);
nkeynes@991
   309
        next->active = 0;
nkeynes@991
   310
        next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
nkeynes@991
   311
        return next;
nkeynes@991
   312
    } else {
nkeynes@991
   313
        return NEXT(block);
nkeynes@991
   314
    }
nkeynes@991
   315
}
nkeynes@991
   316
nkeynes@991
   317
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
   318
/**
nkeynes@991
   319
 * Promote a block in temp space (or elsewhere for that matter) to old space.
nkeynes@991
   320
 *
nkeynes@991
   321
 * @param block to promote.
nkeynes@991
   322
 */
nkeynes@991
   323
static void xlat_promote_to_old_space( xlat_cache_block_t block )
nkeynes@991
   324
{
nkeynes@991
   325
    int allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   326
    int size = block->size;
nkeynes@991
   327
    xlat_cache_block_t curr = xlat_old_cache_ptr;
nkeynes@991
   328
    xlat_cache_block_t start_block = curr;
nkeynes@991
   329
    do {
nkeynes@991
   330
        allocation += curr->size + sizeof(struct xlat_cache_block);
nkeynes@991
   331
        curr = NEXT(curr);
nkeynes@991
   332
        if( allocation > size ) {
nkeynes@991
   333
            break; /* done */
nkeynes@991
   334
        }
nkeynes@991
   335
        if( curr->size == 0 ) { /* End-of-cache Sentinel */
nkeynes@991
   336
            /* Leave what we just released as free space and start again from the
nkeynes@991
   337
             * top of the cache
nkeynes@991
   338
             */
nkeynes@991
   339
            start_block->active = 0;
nkeynes@991
   340
            start_block->size = allocation;
nkeynes@991
   341
            allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   342
            start_block = curr = xlat_old_cache;
nkeynes@991
   343
        }
nkeynes@991
   344
    } while(1);
nkeynes@991
   345
    start_block->active = 1;
nkeynes@991
   346
    start_block->size = allocation;
nkeynes@991
   347
    start_block->lut_entry = block->lut_entry;
nkeynes@1149
   348
    start_block->chain = block->chain;
nkeynes@991
   349
    start_block->fpscr_mask = block->fpscr_mask;
nkeynes@991
   350
    start_block->fpscr = block->fpscr;
nkeynes@991
   351
    start_block->recover_table_offset = block->recover_table_offset;
nkeynes@991
   352
    start_block->recover_table_size = block->recover_table_size;
nkeynes@991
   353
    *block->lut_entry = &start_block->code;
nkeynes@991
   354
    memcpy( start_block->code, block->code, block->size );
nkeynes@991
   355
    xlat_old_cache_ptr = xlat_cut_block(start_block, size );
nkeynes@991
   356
    if( xlat_old_cache_ptr->size == 0 ) {
nkeynes@991
   357
        xlat_old_cache_ptr = xlat_old_cache;
nkeynes@991
   358
    }
nkeynes@991
   359
}
nkeynes@991
   360
nkeynes@991
   361
/**
nkeynes@991
   362
 * Similarly to the above method, promotes a block to temp space.
nkeynes@991
   363
 * TODO: Try to combine these - they're nearly identical
nkeynes@991
   364
 */
nkeynes@991
   365
void xlat_promote_to_temp_space( xlat_cache_block_t block )
nkeynes@991
   366
{
nkeynes@991
   367
    int size = block->size;
nkeynes@991
   368
    int allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   369
    xlat_cache_block_t curr = xlat_temp_cache_ptr;
nkeynes@991
   370
    xlat_cache_block_t start_block = curr;
nkeynes@991
   371
    do {
nkeynes@991
   372
        if( curr->active == BLOCK_USED ) {
nkeynes@991
   373
            xlat_promote_to_old_space( curr );
nkeynes@991
   374
        } else if( curr->active == BLOCK_ACTIVE ) {
nkeynes@991
   375
            // Active but not used, release block
nkeynes@991
   376
            *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
nkeynes@991
   377
        }
nkeynes@991
   378
        allocation += curr->size + sizeof(struct xlat_cache_block);
nkeynes@991
   379
        curr = NEXT(curr);
nkeynes@991
   380
        if( allocation > size ) {
nkeynes@991
   381
            break; /* done */
nkeynes@991
   382
        }
nkeynes@991
   383
        if( curr->size == 0 ) { /* End-of-cache Sentinel */
nkeynes@991
   384
            /* Leave what we just released as free space and start again from the
nkeynes@991
   385
             * top of the cache
nkeynes@991
   386
             */
nkeynes@991
   387
            start_block->active = 0;
nkeynes@991
   388
            start_block->size = allocation;
nkeynes@991
   389
            allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   390
            start_block = curr = xlat_temp_cache;
nkeynes@991
   391
        }
nkeynes@991
   392
    } while(1);
nkeynes@991
   393
    start_block->active = 1;
nkeynes@991
   394
    start_block->size = allocation;
nkeynes@991
   395
    start_block->lut_entry = block->lut_entry;
nkeynes@1149
   396
    start_block->chain = block->chain;
nkeynes@991
   397
    start_block->fpscr_mask = block->fpscr_mask;
nkeynes@991
   398
    start_block->fpscr = block->fpscr;
nkeynes@991
   399
    start_block->recover_table_offset = block->recover_table_offset;
nkeynes@991
   400
    start_block->recover_table_size = block->recover_table_size;
nkeynes@991
   401
    *block->lut_entry = &start_block->code;
nkeynes@991
   402
    memcpy( start_block->code, block->code, block->size );
nkeynes@991
   403
    xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
nkeynes@991
   404
    if( xlat_temp_cache_ptr->size == 0 ) {
nkeynes@991
   405
        xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@991
   406
    }
nkeynes@991
   407
nkeynes@991
   408
}
nkeynes@991
   409
#else 
nkeynes@991
   410
void xlat_promote_to_temp_space( xlat_cache_block_t block )
nkeynes@991
   411
{
nkeynes@1186
   412
    *block->lut_entry = block->chain;
nkeynes@1186
   413
    xlat_delete_block(block);
nkeynes@991
   414
}
nkeynes@991
   415
#endif
nkeynes@991
   416
nkeynes@991
   417
/**
nkeynes@991
   418
 * Returns the next block in the new cache list that can be written to by the
nkeynes@991
   419
 * translator. If the next block is active, it is evicted first.
nkeynes@991
   420
 */
nkeynes@991
   421
xlat_cache_block_t xlat_start_block( sh4addr_t address )
nkeynes@991
   422
{
nkeynes@991
   423
    if( xlat_new_cache_ptr->size == 0 ) {
nkeynes@991
   424
        xlat_new_cache_ptr = xlat_new_cache;
nkeynes@991
   425
    }
nkeynes@991
   426
nkeynes@991
   427
    if( xlat_new_cache_ptr->active ) {
nkeynes@991
   428
        xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@991
   429
    }
nkeynes@991
   430
    xlat_new_create_ptr = xlat_new_cache_ptr;
nkeynes@991
   431
    xlat_new_create_ptr->active = 1;
nkeynes@991
   432
    xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@991
   433
nkeynes@991
   434
    /* Add the LUT entry for the block */
nkeynes@1195
   435
    void **p = xlat_get_lut_entry(address);
nkeynes@1195
   436
    void *entry = *p;
nkeynes@1195
   437
    if( IS_ENTRY_POINT(entry) ) {
nkeynes@1195
   438
        xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
nkeynes@1149
   439
        assert( oldblock->active );
nkeynes@1195
   440
        xlat_new_create_ptr->chain = XLAT_CODE_ADDR(entry);
nkeynes@1149
   441
    } else {
nkeynes@1149
   442
        xlat_new_create_ptr->chain = NULL;
nkeynes@991
   443
    }
nkeynes@1186
   444
    xlat_new_create_ptr->use_list = NULL;
nkeynes@991
   445
nkeynes@1195
   446
    *p = &xlat_new_create_ptr->code;
nkeynes@1195
   447
    if( IS_ENTRY_CONTINUATION(entry) ) {
nkeynes@1195
   448
        *((uintptr_t *)p) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
nkeynes@1195
   449
    }
nkeynes@1195
   450
    xlat_new_create_ptr->lut_entry = p;
nkeynes@991
   451
nkeynes@991
   452
    return xlat_new_create_ptr;
nkeynes@991
   453
}
nkeynes@991
   454
nkeynes@991
   455
xlat_cache_block_t xlat_extend_block( uint32_t newSize )
nkeynes@991
   456
{
nkeynes@1186
   457
    assert( xlat_new_create_ptr->use_list == NULL );
nkeynes@991
   458
    while( xlat_new_create_ptr->size < newSize ) {
nkeynes@991
   459
        if( xlat_new_cache_ptr->size == 0 ) {
nkeynes@991
   460
            /* Migrate to the front of the cache to keep it contiguous */
nkeynes@991
   461
            xlat_new_create_ptr->active = 0;
nkeynes@991
   462
            sh4ptr_t olddata = xlat_new_create_ptr->code;
nkeynes@991
   463
            int oldsize = xlat_new_create_ptr->size;
nkeynes@991
   464
            int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
nkeynes@991
   465
            void **lut_entry = xlat_new_create_ptr->lut_entry;
nkeynes@1149
   466
            void *chain = xlat_new_create_ptr->chain;
nkeynes@991
   467
            int allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@991
   468
            xlat_new_cache_ptr = xlat_new_cache;
nkeynes@991
   469
            do {
nkeynes@991
   470
                if( xlat_new_cache_ptr->active ) {
nkeynes@991
   471
                    xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@991
   472
                }
nkeynes@991
   473
                allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
nkeynes@991
   474
                xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@991
   475
            } while( allocation < size );
nkeynes@991
   476
            xlat_new_create_ptr = xlat_new_cache;
nkeynes@991
   477
            xlat_new_create_ptr->active = 1;
nkeynes@991
   478
            xlat_new_create_ptr->size = allocation;
nkeynes@991
   479
            xlat_new_create_ptr->lut_entry = lut_entry;
nkeynes@1149
   480
            xlat_new_create_ptr->chain = chain;
nkeynes@1186
   481
            xlat_new_create_ptr->use_list = NULL;
nkeynes@991
   482
            *lut_entry = &xlat_new_create_ptr->code;
nkeynes@991
   483
            memmove( xlat_new_create_ptr->code, olddata, oldsize );
nkeynes@991
   484
        } else {
nkeynes@991
   485
            if( xlat_new_cache_ptr->active ) {
nkeynes@991
   486
                xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@991
   487
            }
nkeynes@991
   488
            xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
nkeynes@991
   489
            xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@991
   490
        }
nkeynes@991
   491
    }
nkeynes@991
   492
    return xlat_new_create_ptr;
nkeynes@991
   493
nkeynes@991
   494
}
nkeynes@991
   495
nkeynes@1195
   496
void xlat_commit_block( uint32_t destsize, sh4addr_t startpc, sh4addr_t endpc )
nkeynes@991
   497
{
nkeynes@1195
   498
    void **entry = xlat_get_lut_entry(startpc+2);
nkeynes@1195
   499
    /* assume main entry has already been set at this point */
nkeynes@1195
   500
nkeynes@1195
   501
    for( sh4addr_t pc = startpc+2; pc < endpc; pc += 2 ) {
nkeynes@1195
   502
        if( XLAT_LUT_ENTRY(pc) == 0 )
nkeynes@1195
   503
            entry = xlat_get_lut_entry(pc);
nkeynes@1195
   504
        *((uintptr_t *)entry) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
nkeynes@1195
   505
        entry++;
nkeynes@991
   506
    }
nkeynes@991
   507
nkeynes@991
   508
    xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
nkeynes@991
   509
}
nkeynes@991
   510
nkeynes@991
   511
void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
nkeynes@991
   512
{
nkeynes@991
   513
    int foundptr = 0;
nkeynes@991
   514
    xlat_cache_block_t tail = 
nkeynes@991
   515
        (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
nkeynes@991
   516
nkeynes@991
   517
    assert( tail->active == 1 );
nkeynes@991
   518
    assert( tail->size == 0 ); 
nkeynes@991
   519
    while( cache < tail ) {
nkeynes@991
   520
        assert( cache->active >= 0 && cache->active <= 2 );
nkeynes@991
   521
        assert( cache->size >= 0 && cache->size < size );
nkeynes@991
   522
        if( cache == ptr ) {
nkeynes@991
   523
            foundptr = 1;
nkeynes@991
   524
        }
nkeynes@991
   525
        cache = NEXT(cache);
nkeynes@991
   526
    }
nkeynes@991
   527
    assert( cache == tail );
nkeynes@991
   528
    assert( foundptr == 1 || tail == ptr );
nkeynes@991
   529
}
nkeynes@991
   530
nkeynes@1091
   531
/**
nkeynes@1175
   532
 * Perform a reverse lookup to determine the SH4 address corresponding to
nkeynes@1175
   533
 * the start of the code block containing ptr. This is _slow_ - it does a
nkeynes@1175
   534
 * linear scan of the lookup table to find this.
nkeynes@1175
   535
 *
nkeynes@1175
   536
 * If the pointer cannot be found in any live block, returns -1 (as this
nkeynes@1175
   537
 * is not a legal PC)
nkeynes@1175
   538
 */
nkeynes@1175
   539
sh4addr_t xlat_get_address( unsigned char *ptr )
nkeynes@1175
   540
{
nkeynes@1175
   541
    int i,j;
nkeynes@1175
   542
    for( i=0; i<XLAT_LUT_PAGES; i++ ) {
nkeynes@1175
   543
        void **page = xlat_lut[i];
nkeynes@1175
   544
        if( page != NULL ) {
nkeynes@1175
   545
            for( j=0; j<XLAT_LUT_PAGE_ENTRIES; j++ ) {
nkeynes@1175
   546
                void *entry = page[j];
nkeynes@1186
   547
                if( ((uintptr_t)entry) > (uintptr_t)XLAT_LUT_ENTRY_USED ) {
nkeynes@1195
   548
                    xlat_cache_block_t block = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
nkeynes@1175
   549
                    if( ptr >= block->code && ptr < block->code + block->size) {
nkeynes@1175
   550
                        /* Found it */
nkeynes@1175
   551
                        return (i<<13) | (j<<1);
nkeynes@1175
   552
                    }
nkeynes@1175
   553
                }
nkeynes@1175
   554
            }
nkeynes@1175
   555
        }
nkeynes@1175
   556
    }
nkeynes@1175
   557
    return -1;
nkeynes@1175
   558
}
nkeynes@1175
   559
nkeynes@1175
   560
/**
nkeynes@1091
   561
 * Sanity check that the given pointer is at least contained in one of cache
nkeynes@1091
   562
 * regions, and has a sane-ish size. We don't do a full region walk atm.
nkeynes@1091
   563
 */
nkeynes@1091
   564
gboolean xlat_is_code_pointer( void *p )
nkeynes@1091
   565
{
nkeynes@1091
   566
    char *region;
nkeynes@1091
   567
    uintptr_t region_size;
nkeynes@1091
   568
nkeynes@1091
   569
    xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
nkeynes@1091
   570
    if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) {
nkeynes@1091
   571
         /* Pointer is in new cache */
nkeynes@1091
   572
        region = (char *)xlat_new_cache;
nkeynes@1091
   573
        region_size = XLAT_NEW_CACHE_SIZE;
nkeynes@1091
   574
    }
nkeynes@1091
   575
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@1091
   576
    else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) {
nkeynes@1091
   577
         /* Pointer is in temp cache */
nkeynes@1091
   578
        region = (char *)xlat_temp_cache;
nkeynes@1091
   579
        region_size = XLAT_TEMP_CACHE_SIZE;
nkeynes@1091
   580
    } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) {
nkeynes@1091
   581
        /* Pointer is in old cache */
nkeynes@1091
   582
        region = (char *)xlat_old_cache;
nkeynes@1091
   583
        region_size = XLAT_OLD_CACHE_SIZE;
nkeynes@1091
   584
    }
nkeynes@1091
   585
#endif
nkeynes@1091
   586
    else {
nkeynes@1091
   587
        /* Not a valid cache pointer */
nkeynes@1091
   588
        return FALSE;
nkeynes@1091
   589
    }
nkeynes@1091
   590
nkeynes@1091
   591
    /* Make sure the whole block is in the region */
nkeynes@1091
   592
    if( (((char *)p) - region) >= region_size ||
nkeynes@1091
   593
        (((char *)(NEXT(block))) - region) >= region_size )
nkeynes@1091
   594
        return FALSE;
nkeynes@1091
   595
    return TRUE;
nkeynes@1091
   596
}
nkeynes@1091
   597
nkeynes@991
   598
void xlat_check_integrity( )
nkeynes@991
   599
{
nkeynes@991
   600
    xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
nkeynes@991
   601
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@991
   602
    xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
nkeynes@991
   603
    xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
nkeynes@991
   604
#endif
nkeynes@991
   605
}
nkeynes@991
   606
nkeynes@1182
   607
unsigned int xlat_get_active_block_count()
nkeynes@1182
   608
{
nkeynes@1182
   609
    unsigned int count = 0;
nkeynes@1182
   610
    xlat_cache_block_t ptr = xlat_new_cache;
nkeynes@1182
   611
    while( ptr->size != 0 ) {
nkeynes@1182
   612
        if( ptr->active != 0 ) {
nkeynes@1182
   613
            count++;
nkeynes@1182
   614
        }
nkeynes@1182
   615
        ptr = NEXT(ptr);
nkeynes@1182
   616
    }
nkeynes@1182
   617
    return count;
nkeynes@1182
   618
}
nkeynes@1182
   619
nkeynes@1188
   620
unsigned int xlat_get_active_blocks( struct xlat_block_ref *blocks, unsigned int size )
nkeynes@1182
   621
{
nkeynes@1182
   622
    unsigned int count = 0;
nkeynes@1182
   623
    xlat_cache_block_t ptr = xlat_new_cache;
nkeynes@1182
   624
    while( ptr->size != 0 ) {
nkeynes@1182
   625
        if( ptr->active != 0 ) {
nkeynes@1182
   626
            blocks[count].block = ptr;
nkeynes@1188
   627
            blocks[count].pc = 0;
nkeynes@1182
   628
            count++;
nkeynes@1182
   629
        }
nkeynes@1182
   630
        if( count >= size )
nkeynes@1182
   631
            break;
nkeynes@1182
   632
        ptr = NEXT(ptr);
nkeynes@1182
   633
    }
nkeynes@1182
   634
    return count;
nkeynes@1182
   635
}
nkeynes@1182
   636
nkeynes@1189
   637
static void xlat_get_block_pcs( struct xlat_block_ref *blocks, unsigned int size )
nkeynes@1182
   638
{
nkeynes@1182
   639
    unsigned i;
nkeynes@1182
   640
    for( i=0; i<XLAT_LUT_PAGES;i ++ ) {
nkeynes@1182
   641
        void **page = xlat_lut[i];
nkeynes@1182
   642
        if( page != NULL ) {
nkeynes@1182
   643
            for( unsigned j=0; j < XLAT_LUT_PAGE_ENTRIES; j++ ) {
nkeynes@1195
   644
                void *code = XLAT_CODE_ADDR(page[j]);
nkeynes@1182
   645
                if( code != NULL ) {
nkeynes@1182
   646
                    xlat_cache_block_t ptr = XLAT_BLOCK_FOR_CODE(code);
nkeynes@1182
   647
                    sh4addr_t pc = XLAT_ADDR_FROM_ENTRY(i,j);
nkeynes@1182
   648
                    for( unsigned k=0; k<size; k++ ) {
nkeynes@1182
   649
                        if( blocks[k].block == ptr ) {
nkeynes@1188
   650
                            blocks[k].pc = pc;
nkeynes@1182
   651
                            ptr = ptr->chain;
nkeynes@1182
   652
                            if( ptr == NULL )
nkeynes@1182
   653
                                break;
nkeynes@1182
   654
                            else {
nkeynes@1182
   655
                                ptr = XLAT_BLOCK_FOR_CODE(ptr);
nkeynes@1182
   656
                                k = 0;
nkeynes@1182
   657
                            }
nkeynes@1182
   658
                        }
nkeynes@1182
   659
                    }
nkeynes@1182
   660
                }
nkeynes@1182
   661
            }
nkeynes@1182
   662
        }
nkeynes@1182
   663
    }
nkeynes@1182
   664
}
nkeynes@1182
   665
nkeynes@1182
   666
static int xlat_compare_active_field( const void *a, const void *b )
nkeynes@1182
   667
{
nkeynes@1188
   668
    const struct xlat_block_ref *ptra = (const struct xlat_block_ref *)a;
nkeynes@1188
   669
    const struct xlat_block_ref *ptrb = (const struct xlat_block_ref *)b;
nkeynes@1182
   670
    return ptrb->block->active - ptra->block->active;
nkeynes@1182
   671
}
nkeynes@1182
   672
nkeynes@1188
   673
unsigned int xlat_get_cache_blocks_by_activity( xlat_block_ref_t outblocks, size_t topN )
nkeynes@1182
   674
{
nkeynes@1182
   675
    int i=0;
nkeynes@1182
   676
    int count = xlat_get_active_block_count();
nkeynes@1182
   677
nkeynes@1188
   678
    struct xlat_block_ref blocks[count];
nkeynes@1182
   679
    xlat_get_active_blocks(blocks, count);
nkeynes@1189
   680
    xlat_get_block_pcs(blocks,count);
nkeynes@1188
   681
    qsort(blocks, count, sizeof(struct xlat_block_ref), xlat_compare_active_field);
nkeynes@1182
   682
nkeynes@1188
   683
    if( topN > count )
nkeynes@1182
   684
        topN = count;
nkeynes@1188
   685
    memcpy(outblocks, blocks, topN*sizeof(struct xlat_block_ref));
nkeynes@1188
   686
    return topN;
nkeynes@1182
   687
}
.