Search
lxdream.org :: lxdream/src/sh4/xltcache.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/xltcache.c
changeset 986:5090104b0963
prev953:f4a156508ad1
author nkeynes
date Tue Feb 24 11:50:17 2009 +0000 (15 years ago)
permissions -rw-r--r--
last change Fix minor warnings
Add explicit casts to mmap() result for platforms where it's not void *...
file annotate diff log raw
nkeynes@359
     1
/**
nkeynes@586
     2
 * $Id$
nkeynes@359
     3
 * 
nkeynes@359
     4
 * Translation cache management. This part is architecture independent.
nkeynes@359
     5
 *
nkeynes@359
     6
 * Copyright (c) 2005 Nathan Keynes.
nkeynes@359
     7
 *
nkeynes@359
     8
 * This program is free software; you can redistribute it and/or modify
nkeynes@359
     9
 * it under the terms of the GNU General Public License as published by
nkeynes@359
    10
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@359
    11
 * (at your option) any later version.
nkeynes@359
    12
 *
nkeynes@359
    13
 * This program is distributed in the hope that it will be useful,
nkeynes@359
    14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@359
    15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@359
    16
 * GNU General Public License for more details.
nkeynes@359
    17
 */
nkeynes@736
    18
nkeynes@488
    19
#include <sys/types.h>
nkeynes@359
    20
#include <sys/mman.h>
nkeynes@359
    21
#include <assert.h>
nkeynes@359
    22
nkeynes@428
    23
#include "dreamcast.h"
nkeynes@586
    24
#include "sh4/sh4core.h"
nkeynes@428
    25
#include "sh4/xltcache.h"
nkeynes@428
    26
#include "x86dasm/x86dasm.h"
nkeynes@428
    27
nkeynes@359
    28
#define XLAT_LUT_PAGE_BITS 12
nkeynes@359
    29
#define XLAT_LUT_TOTAL_BITS 28
nkeynes@359
    30
#define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
nkeynes@359
    31
#define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
nkeynes@359
    32
nkeynes@359
    33
#define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
nkeynes@359
    34
#define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
nkeynes@359
    35
#define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
nkeynes@359
    36
nkeynes@359
    37
#define XLAT_LUT_ENTRY_EMPTY (void *)0
nkeynes@359
    38
#define XLAT_LUT_ENTRY_USED  (void *)1
nkeynes@359
    39
nkeynes@359
    40
#define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
nkeynes@359
    41
#define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
nkeynes@359
    42
#define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
nkeynes@359
    43
nkeynes@359
    44
#define MIN_BLOCK_SIZE 32
nkeynes@359
    45
#define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
nkeynes@359
    46
nkeynes@359
    47
#define BLOCK_INACTIVE 0
nkeynes@359
    48
#define BLOCK_ACTIVE 1
nkeynes@359
    49
#define BLOCK_USED 2
nkeynes@359
    50
nkeynes@359
    51
xlat_cache_block_t xlat_new_cache;
nkeynes@359
    52
xlat_cache_block_t xlat_new_cache_ptr;
nkeynes@359
    53
xlat_cache_block_t xlat_new_create_ptr;
nkeynes@922
    54
nkeynes@922
    55
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@359
    56
xlat_cache_block_t xlat_temp_cache;
nkeynes@359
    57
xlat_cache_block_t xlat_temp_cache_ptr;
nkeynes@359
    58
xlat_cache_block_t xlat_old_cache;
nkeynes@359
    59
xlat_cache_block_t xlat_old_cache_ptr;
nkeynes@922
    60
#endif
nkeynes@922
    61
nkeynes@953
    62
static void **xlat_lut[XLAT_LUT_PAGES];
nkeynes@376
    63
static gboolean xlat_initialized = FALSE;
nkeynes@359
    64
nkeynes@422
    65
void xlat_cache_init(void) 
nkeynes@359
    66
{
nkeynes@376
    67
    if( !xlat_initialized ) {
nkeynes@736
    68
        xlat_initialized = TRUE;
nkeynes@986
    69
        xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@736
    70
                MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@922
    71
        xlat_new_cache_ptr = xlat_new_cache;
nkeynes@922
    72
        xlat_new_create_ptr = xlat_new_cache;
nkeynes@922
    73
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@736
    74
        xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@736
    75
                MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@736
    76
        xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@736
    77
                MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@736
    78
        xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@736
    79
        xlat_old_cache_ptr = xlat_old_cache;
nkeynes@922
    80
#endif
nkeynes@953
    81
//        xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
nkeynes@953
    82
//                MAP_PRIVATE|MAP_ANON, -1, 0);
nkeynes@736
    83
        memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
nkeynes@376
    84
    }
nkeynes@359
    85
    xlat_flush_cache();
nkeynes@359
    86
}
nkeynes@359
    87
nkeynes@359
    88
/**
nkeynes@359
    89
 * Reset the cache structure to its default state
nkeynes@359
    90
 */
nkeynes@359
    91
void xlat_flush_cache() 
nkeynes@359
    92
{
nkeynes@359
    93
    xlat_cache_block_t tmp;
nkeynes@359
    94
    int i;
nkeynes@359
    95
    xlat_new_cache_ptr = xlat_new_cache;
nkeynes@359
    96
    xlat_new_cache_ptr->active = 0;
nkeynes@359
    97
    xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@359
    98
    tmp = NEXT(xlat_new_cache_ptr);
nkeynes@359
    99
    tmp->active = 1;
nkeynes@359
   100
    tmp->size = 0;
nkeynes@922
   101
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@359
   102
    xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@359
   103
    xlat_temp_cache_ptr->active = 0;
nkeynes@359
   104
    xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@359
   105
    tmp = NEXT(xlat_temp_cache_ptr);
nkeynes@359
   106
    tmp->active = 1;
nkeynes@359
   107
    tmp->size = 0;
nkeynes@359
   108
    xlat_old_cache_ptr = xlat_old_cache;
nkeynes@359
   109
    xlat_old_cache_ptr->active = 0;
nkeynes@359
   110
    xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@359
   111
    tmp = NEXT(xlat_old_cache_ptr);
nkeynes@359
   112
    tmp->active = 1;
nkeynes@359
   113
    tmp->size = 0;
nkeynes@922
   114
#endif
nkeynes@359
   115
    for( i=0; i<XLAT_LUT_PAGES; i++ ) {
nkeynes@736
   116
        if( xlat_lut[i] != NULL ) {
nkeynes@736
   117
            memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
nkeynes@736
   118
        }
nkeynes@359
   119
    }
nkeynes@359
   120
}
nkeynes@359
   121
nkeynes@400
   122
static void xlat_flush_page_by_lut( void **page )
nkeynes@359
   123
{
nkeynes@359
   124
    int i;
nkeynes@359
   125
    for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
nkeynes@736
   126
        if( IS_ENTRY_POINT(page[i]) ) {
nkeynes@901
   127
            XLAT_BLOCK_FOR_CODE(page[i])->active = 0;
nkeynes@736
   128
        }
nkeynes@736
   129
        page[i] = NULL;
nkeynes@359
   130
    }
nkeynes@359
   131
}
nkeynes@359
   132
nkeynes@905
   133
void FASTCALL xlat_invalidate_word( sh4addr_t addr )
nkeynes@400
   134
{
nkeynes@953
   135
    void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
nkeynes@953
   136
    if( page != NULL ) {
nkeynes@953
   137
        int entry = XLAT_LUT_ENTRY(addr);
nkeynes@953
   138
        if( page[entry] != NULL ) {
nkeynes@953
   139
            xlat_flush_page_by_lut(page);
nkeynes@736
   140
        }
nkeynes@400
   141
    }
nkeynes@400
   142
}
nkeynes@400
   143
nkeynes@905
   144
void FASTCALL xlat_invalidate_long( sh4addr_t addr )
nkeynes@400
   145
{
nkeynes@953
   146
    void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
nkeynes@953
   147
    if( page != NULL ) {
nkeynes@953
   148
        int entry = XLAT_LUT_ENTRY(addr);
nkeynes@953
   149
        if( *(uint64_t *)&page[entry] != 0 ) {
nkeynes@953
   150
            xlat_flush_page_by_lut(page);
nkeynes@736
   151
        }
nkeynes@400
   152
    }
nkeynes@400
   153
}
nkeynes@400
   154
nkeynes@905
   155
void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
nkeynes@400
   156
{
nkeynes@400
   157
    int i;
nkeynes@400
   158
    int entry_count = size >> 1; // words;
nkeynes@400
   159
    uint32_t page_no = XLAT_LUT_PAGE(address);
nkeynes@400
   160
    int entry = XLAT_LUT_ENTRY(address);
nkeynes@953
   161
    do {
nkeynes@953
   162
        void **page = xlat_lut[page_no];
nkeynes@953
   163
        int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
nkeynes@953
   164
        if( entry_count < page_entries ) {
nkeynes@953
   165
            page_entries = entry_count;
nkeynes@953
   166
        }
nkeynes@953
   167
        if( page != NULL ) {
nkeynes@953
   168
            if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
nkeynes@953
   169
                /* Overwriting the entire page anyway */
nkeynes@953
   170
                xlat_flush_page_by_lut(page);
nkeynes@953
   171
            } else {
nkeynes@953
   172
                for( i=entry; i<entry+page_entries; i++ ) {
nkeynes@953
   173
                    if( page[i] != NULL ) {
nkeynes@953
   174
                        xlat_flush_page_by_lut(page);
nkeynes@953
   175
                        break;
nkeynes@736
   176
                    }
nkeynes@736
   177
                }
nkeynes@736
   178
            }
nkeynes@736
   179
            entry_count -= page_entries;
nkeynes@953
   180
        }
nkeynes@953
   181
        page_no ++;
nkeynes@953
   182
        entry_count -= page_entries;
nkeynes@953
   183
        entry = 0;
nkeynes@953
   184
    } while( entry_count > 0 );
nkeynes@400
   185
}
nkeynes@400
   186
nkeynes@905
   187
void FASTCALL xlat_flush_page( sh4addr_t address )
nkeynes@400
   188
{
nkeynes@400
   189
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@400
   190
    if( page != NULL ) {
nkeynes@736
   191
        xlat_flush_page_by_lut(page);
nkeynes@400
   192
    }
nkeynes@400
   193
}
nkeynes@400
   194
nkeynes@905
   195
void * FASTCALL xlat_get_code( sh4addr_t address )
nkeynes@359
   196
{
nkeynes@410
   197
    void *result = NULL;
nkeynes@359
   198
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@407
   199
    if( page != NULL ) {
nkeynes@736
   200
        result = (void *)(((uintptr_t)(page[XLAT_LUT_ENTRY(address)])) & (~((uintptr_t)0x03)));
nkeynes@407
   201
    }
nkeynes@407
   202
    return result;
nkeynes@407
   203
}
nkeynes@407
   204
nkeynes@809
   205
xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
nkeynes@809
   206
{
nkeynes@809
   207
    if( code != NULL ) {
nkeynes@809
   208
        uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
nkeynes@901
   209
        xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
nkeynes@809
   210
        uint32_t count = block->recover_table_size;
nkeynes@809
   211
        xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
nkeynes@809
   212
        uint32_t posn;
nkeynes@809
   213
        for( posn = 1; posn < count; posn++ ) {
nkeynes@809
   214
        	if( records[posn].xlat_offset >= pc_offset ) {
nkeynes@809
   215
        		return &records[posn-1];
nkeynes@809
   216
        	}
nkeynes@809
   217
        }
nkeynes@809
   218
        return &records[count-1];
nkeynes@809
   219
    }
nkeynes@809
   220
    return NULL;	
nkeynes@809
   221
}
nkeynes@809
   222
nkeynes@905
   223
void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
nkeynes@407
   224
{
nkeynes@407
   225
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@407
   226
nkeynes@407
   227
    /* Add the LUT entry for the block */
nkeynes@359
   228
    if( page == NULL ) {
nkeynes@736
   229
        xlat_lut[XLAT_LUT_PAGE(address)] = page =
nkeynes@986
   230
            (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
nkeynes@736
   231
                    MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@736
   232
        memset( page, 0, XLAT_LUT_PAGE_SIZE );
nkeynes@359
   233
    }
nkeynes@736
   234
nkeynes@407
   235
    return &page[XLAT_LUT_ENTRY(address)];
nkeynes@359
   236
}
nkeynes@359
   237
nkeynes@407
   238
nkeynes@407
   239
nkeynes@905
   240
uint32_t FASTCALL xlat_get_block_size( void *block )
nkeynes@366
   241
{
nkeynes@366
   242
    xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
nkeynes@366
   243
    return xlt->size;
nkeynes@366
   244
}
nkeynes@366
   245
nkeynes@905
   246
uint32_t FASTCALL xlat_get_code_size( void *block )
nkeynes@586
   247
{
nkeynes@586
   248
    xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
nkeynes@592
   249
    if( xlt->recover_table_offset == 0 ) {
nkeynes@736
   250
        return xlt->size;
nkeynes@586
   251
    } else {
nkeynes@736
   252
        return xlt->recover_table_offset;
nkeynes@586
   253
    }
nkeynes@586
   254
}
nkeynes@586
   255
nkeynes@359
   256
/**
nkeynes@359
   257
 * Cut the specified block so that it has the given size, with the remaining data
nkeynes@359
   258
 * forming a new free block. If the free block would be less than the minimum size,
nkeynes@359
   259
 * the cut is not performed.
nkeynes@359
   260
 * @return the next block after the (possibly cut) block.
nkeynes@359
   261
 */
nkeynes@359
   262
static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
nkeynes@359
   263
{
nkeynes@407
   264
    cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
nkeynes@410
   265
    assert( cutsize <= block->size );
nkeynes@359
   266
    if( block->size > cutsize + MIN_TOTAL_SIZE ) {
nkeynes@736
   267
        int oldsize = block->size;
nkeynes@736
   268
        block->size = cutsize;
nkeynes@736
   269
        xlat_cache_block_t next = NEXT(block);
nkeynes@736
   270
        next->active = 0;
nkeynes@736
   271
        next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
nkeynes@736
   272
        return next;
nkeynes@359
   273
    } else {
nkeynes@736
   274
        return NEXT(block);
nkeynes@359
   275
    }
nkeynes@359
   276
}
nkeynes@359
   277
nkeynes@922
   278
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@359
   279
/**
nkeynes@359
   280
 * Promote a block in temp space (or elsewhere for that matter) to old space.
nkeynes@359
   281
 *
nkeynes@359
   282
 * @param block to promote.
nkeynes@359
   283
 */
nkeynes@359
   284
static void xlat_promote_to_old_space( xlat_cache_block_t block )
nkeynes@359
   285
{
nkeynes@788
   286
    int allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@359
   287
    int size = block->size;
nkeynes@359
   288
    xlat_cache_block_t curr = xlat_old_cache_ptr;
nkeynes@359
   289
    xlat_cache_block_t start_block = curr;
nkeynes@359
   290
    do {
nkeynes@736
   291
        allocation += curr->size + sizeof(struct xlat_cache_block);
nkeynes@736
   292
        curr = NEXT(curr);
nkeynes@736
   293
        if( allocation > size ) {
nkeynes@736
   294
            break; /* done */
nkeynes@736
   295
        }
nkeynes@736
   296
        if( curr->size == 0 ) { /* End-of-cache Sentinel */
nkeynes@736
   297
            /* Leave what we just released as free space and start again from the
nkeynes@736
   298
             * top of the cache
nkeynes@736
   299
             */
nkeynes@736
   300
            start_block->active = 0;
nkeynes@736
   301
            start_block->size = allocation;
nkeynes@788
   302
            allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@736
   303
            start_block = curr = xlat_old_cache;
nkeynes@736
   304
        }
nkeynes@359
   305
    } while(1);
nkeynes@359
   306
    start_block->active = 1;
nkeynes@359
   307
    start_block->size = allocation;
nkeynes@359
   308
    start_block->lut_entry = block->lut_entry;
nkeynes@901
   309
    start_block->fpscr_mask = block->fpscr_mask;
nkeynes@901
   310
    start_block->fpscr = block->fpscr;
nkeynes@596
   311
    start_block->recover_table_offset = block->recover_table_offset;
nkeynes@596
   312
    start_block->recover_table_size = block->recover_table_size;
nkeynes@359
   313
    *block->lut_entry = &start_block->code;
nkeynes@359
   314
    memcpy( start_block->code, block->code, block->size );
nkeynes@359
   315
    xlat_old_cache_ptr = xlat_cut_block(start_block, size );
nkeynes@359
   316
    if( xlat_old_cache_ptr->size == 0 ) {
nkeynes@736
   317
        xlat_old_cache_ptr = xlat_old_cache;
nkeynes@359
   318
    }
nkeynes@359
   319
}
nkeynes@359
   320
nkeynes@359
   321
/**
nkeynes@359
   322
 * Similarly to the above method, promotes a block to temp space.
nkeynes@359
   323
 * TODO: Try to combine these - they're nearly identical
nkeynes@359
   324
 */
nkeynes@359
   325
void xlat_promote_to_temp_space( xlat_cache_block_t block )
nkeynes@359
   326
{
nkeynes@359
   327
    int size = block->size;
nkeynes@788
   328
    int allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@359
   329
    xlat_cache_block_t curr = xlat_temp_cache_ptr;
nkeynes@359
   330
    xlat_cache_block_t start_block = curr;
nkeynes@359
   331
    do {
nkeynes@736
   332
        if( curr->active == BLOCK_USED ) {
nkeynes@736
   333
            xlat_promote_to_old_space( curr );
nkeynes@736
   334
        } else if( curr->active == BLOCK_ACTIVE ) {
nkeynes@736
   335
            // Active but not used, release block
nkeynes@736
   336
            *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
nkeynes@736
   337
        }
nkeynes@736
   338
        allocation += curr->size + sizeof(struct xlat_cache_block);
nkeynes@736
   339
        curr = NEXT(curr);
nkeynes@736
   340
        if( allocation > size ) {
nkeynes@736
   341
            break; /* done */
nkeynes@736
   342
        }
nkeynes@736
   343
        if( curr->size == 0 ) { /* End-of-cache Sentinel */
nkeynes@736
   344
            /* Leave what we just released as free space and start again from the
nkeynes@736
   345
             * top of the cache
nkeynes@736
   346
             */
nkeynes@736
   347
            start_block->active = 0;
nkeynes@736
   348
            start_block->size = allocation;
nkeynes@788
   349
            allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@736
   350
            start_block = curr = xlat_temp_cache;
nkeynes@736
   351
        }
nkeynes@359
   352
    } while(1);
nkeynes@359
   353
    start_block->active = 1;
nkeynes@359
   354
    start_block->size = allocation;
nkeynes@359
   355
    start_block->lut_entry = block->lut_entry;
nkeynes@901
   356
    start_block->fpscr_mask = block->fpscr_mask;
nkeynes@901
   357
    start_block->fpscr = block->fpscr;
nkeynes@596
   358
    start_block->recover_table_offset = block->recover_table_offset;
nkeynes@596
   359
    start_block->recover_table_size = block->recover_table_size;
nkeynes@359
   360
    *block->lut_entry = &start_block->code;
nkeynes@359
   361
    memcpy( start_block->code, block->code, block->size );
nkeynes@359
   362
    xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
nkeynes@359
   363
    if( xlat_temp_cache_ptr->size == 0 ) {
nkeynes@736
   364
        xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@359
   365
    }
nkeynes@736
   366
nkeynes@359
   367
}
nkeynes@922
   368
#else 
nkeynes@922
   369
void xlat_promote_to_temp_space( xlat_cache_block_t block )
nkeynes@922
   370
{
nkeynes@922
   371
    *block->lut_entry = 0;
nkeynes@922
   372
}
nkeynes@922
   373
#endif
nkeynes@359
   374
nkeynes@359
   375
/**
nkeynes@359
   376
 * Returns the next block in the new cache list that can be written to by the
nkeynes@359
   377
 * translator. If the next block is active, it is evicted first.
nkeynes@359
   378
 */
nkeynes@359
   379
xlat_cache_block_t xlat_start_block( sh4addr_t address )
nkeynes@359
   380
{
nkeynes@359
   381
    if( xlat_new_cache_ptr->size == 0 ) {
nkeynes@736
   382
        xlat_new_cache_ptr = xlat_new_cache;
nkeynes@359
   383
    }
nkeynes@359
   384
nkeynes@359
   385
    if( xlat_new_cache_ptr->active ) {
nkeynes@736
   386
        xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@359
   387
    }
nkeynes@359
   388
    xlat_new_create_ptr = xlat_new_cache_ptr;
nkeynes@359
   389
    xlat_new_create_ptr->active = 1;
nkeynes@359
   390
    xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@359
   391
nkeynes@359
   392
    /* Add the LUT entry for the block */
nkeynes@359
   393
    if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) {
nkeynes@736
   394
        xlat_lut[XLAT_LUT_PAGE(address)] =
nkeynes@986
   395
            (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
nkeynes@736
   396
                    MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@736
   397
        memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE );
nkeynes@359
   398
    }
nkeynes@359
   399
nkeynes@359
   400
    if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) {
nkeynes@901
   401
        xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_CODE(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]);
nkeynes@736
   402
        oldblock->active = 0;
nkeynes@359
   403
    }
nkeynes@359
   404
nkeynes@359
   405
    xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] = 
nkeynes@736
   406
        &xlat_new_create_ptr->code;
nkeynes@359
   407
    xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address);
nkeynes@736
   408
nkeynes@359
   409
    return xlat_new_create_ptr;
nkeynes@359
   410
}
nkeynes@359
   411
nkeynes@410
   412
xlat_cache_block_t xlat_extend_block( uint32_t newSize )
nkeynes@359
   413
{
nkeynes@410
   414
    while( xlat_new_create_ptr->size < newSize ) {
nkeynes@736
   415
        if( xlat_new_cache_ptr->size == 0 ) {
nkeynes@736
   416
            /* Migrate to the front of the cache to keep it contiguous */
nkeynes@736
   417
            xlat_new_create_ptr->active = 0;
nkeynes@736
   418
            sh4ptr_t olddata = xlat_new_create_ptr->code;
nkeynes@736
   419
            int oldsize = xlat_new_create_ptr->size;
nkeynes@736
   420
            int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
nkeynes@736
   421
            void **lut_entry = xlat_new_create_ptr->lut_entry;
nkeynes@788
   422
            int allocation = (int)-sizeof(struct xlat_cache_block);
nkeynes@736
   423
            xlat_new_cache_ptr = xlat_new_cache;
nkeynes@736
   424
            do {
nkeynes@736
   425
                if( xlat_new_cache_ptr->active ) {
nkeynes@736
   426
                    xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@736
   427
                }
nkeynes@736
   428
                allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
nkeynes@736
   429
                xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@736
   430
            } while( allocation < size );
nkeynes@736
   431
            xlat_new_create_ptr = xlat_new_cache;
nkeynes@736
   432
            xlat_new_create_ptr->active = 1;
nkeynes@736
   433
            xlat_new_create_ptr->size = allocation;
nkeynes@736
   434
            xlat_new_create_ptr->lut_entry = lut_entry;
nkeynes@736
   435
            *lut_entry = &xlat_new_create_ptr->code;
nkeynes@736
   436
            memmove( xlat_new_create_ptr->code, olddata, oldsize );
nkeynes@736
   437
        } else {
nkeynes@736
   438
            if( xlat_new_cache_ptr->active ) {
nkeynes@736
   439
                xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@736
   440
            }
nkeynes@736
   441
            xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
nkeynes@736
   442
            xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@736
   443
        }
nkeynes@359
   444
    }
nkeynes@359
   445
    return xlat_new_create_ptr;
nkeynes@359
   446
nkeynes@359
   447
}
nkeynes@359
   448
nkeynes@359
   449
void xlat_commit_block( uint32_t destsize, uint32_t srcsize )
nkeynes@359
   450
{
nkeynes@359
   451
    void **ptr = xlat_new_create_ptr->lut_entry;
nkeynes@359
   452
    void **endptr = ptr + (srcsize>>2);
nkeynes@359
   453
    while( ptr < endptr ) {
nkeynes@736
   454
        if( *ptr == NULL ) {
nkeynes@736
   455
            *ptr = XLAT_LUT_ENTRY_USED;
nkeynes@736
   456
        }
nkeynes@736
   457
        ptr++;
nkeynes@359
   458
    }
nkeynes@359
   459
nkeynes@359
   460
    xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
nkeynes@359
   461
}
nkeynes@359
   462
nkeynes@359
   463
void xlat_delete_block( xlat_cache_block_t block ) 
nkeynes@359
   464
{
nkeynes@359
   465
    block->active = 0;
nkeynes@359
   466
    *block->lut_entry = NULL;
nkeynes@359
   467
}
nkeynes@359
   468
nkeynes@359
   469
void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
nkeynes@359
   470
{
nkeynes@359
   471
    int foundptr = 0;
nkeynes@359
   472
    xlat_cache_block_t tail = 
nkeynes@736
   473
        (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
nkeynes@359
   474
nkeynes@359
   475
    assert( tail->active == 1 );
nkeynes@359
   476
    assert( tail->size == 0 ); 
nkeynes@359
   477
    while( cache < tail ) {
nkeynes@736
   478
        assert( cache->active >= 0 && cache->active <= 2 );
nkeynes@736
   479
        assert( cache->size >= 0 && cache->size < size );
nkeynes@736
   480
        if( cache == ptr ) {
nkeynes@736
   481
            foundptr = 1;
nkeynes@736
   482
        }
nkeynes@736
   483
        cache = NEXT(cache);
nkeynes@359
   484
    }
nkeynes@359
   485
    assert( cache == tail );
nkeynes@596
   486
    assert( foundptr == 1 || tail == ptr );
nkeynes@359
   487
}
nkeynes@359
   488
nkeynes@359
   489
void xlat_check_integrity( )
nkeynes@359
   490
{
nkeynes@359
   491
    xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
nkeynes@922
   492
#ifdef XLAT_GENERATIONAL_CACHE
nkeynes@359
   493
    xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
nkeynes@359
   494
    xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
nkeynes@922
   495
#endif
nkeynes@359
   496
}
nkeynes@376
   497
.