nkeynes@359 | 1 | /**
|
nkeynes@586 | 2 | * $Id$
|
nkeynes@359 | 3 | *
|
nkeynes@359 | 4 | * Translation cache management. This part is architecture independent.
|
nkeynes@359 | 5 | *
|
nkeynes@359 | 6 | * Copyright (c) 2005 Nathan Keynes.
|
nkeynes@359 | 7 | *
|
nkeynes@359 | 8 | * This program is free software; you can redistribute it and/or modify
|
nkeynes@359 | 9 | * it under the terms of the GNU General Public License as published by
|
nkeynes@359 | 10 | * the Free Software Foundation; either version 2 of the License, or
|
nkeynes@359 | 11 | * (at your option) any later version.
|
nkeynes@359 | 12 | *
|
nkeynes@359 | 13 | * This program is distributed in the hope that it will be useful,
|
nkeynes@359 | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
nkeynes@359 | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
nkeynes@359 | 16 | * GNU General Public License for more details.
|
nkeynes@359 | 17 | */
|
nkeynes@736 | 18 |
|
nkeynes@488 | 19 | #include <sys/types.h>
|
nkeynes@359 | 20 | #include <sys/mman.h>
|
nkeynes@359 | 21 | #include <assert.h>
|
nkeynes@359 | 22 |
|
nkeynes@428 | 23 | #include "dreamcast.h"
|
nkeynes@586 | 24 | #include "sh4/sh4core.h"
|
nkeynes@428 | 25 | #include "sh4/xltcache.h"
|
nkeynes@428 | 26 | #include "x86dasm/x86dasm.h"
|
nkeynes@428 | 27 |
|
nkeynes@359 | 28 | #define XLAT_LUT_PAGE_BITS 12
|
nkeynes@359 | 29 | #define XLAT_LUT_TOTAL_BITS 28
|
nkeynes@359 | 30 | #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
|
nkeynes@359 | 31 | #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
|
nkeynes@359 | 32 |
|
nkeynes@359 | 33 | #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
|
nkeynes@359 | 34 | #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
|
nkeynes@359 | 35 | #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
|
nkeynes@359 | 36 |
|
nkeynes@359 | 37 | #define XLAT_LUT_ENTRY_EMPTY (void *)0
|
nkeynes@359 | 38 | #define XLAT_LUT_ENTRY_USED (void *)1
|
nkeynes@359 | 39 |
|
nkeynes@359 | 40 | #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
|
nkeynes@359 | 41 | #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
|
nkeynes@359 | 42 | #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
|
nkeynes@359 | 43 |
|
nkeynes@359 | 44 | #define MIN_BLOCK_SIZE 32
|
nkeynes@359 | 45 | #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
|
nkeynes@359 | 46 |
|
nkeynes@359 | 47 | #define BLOCK_INACTIVE 0
|
nkeynes@359 | 48 | #define BLOCK_ACTIVE 1
|
nkeynes@359 | 49 | #define BLOCK_USED 2
|
nkeynes@359 | 50 |
|
nkeynes@359 | 51 | xlat_cache_block_t xlat_new_cache;
|
nkeynes@359 | 52 | xlat_cache_block_t xlat_new_cache_ptr;
|
nkeynes@359 | 53 | xlat_cache_block_t xlat_new_create_ptr;
|
nkeynes@359 | 54 | xlat_cache_block_t xlat_temp_cache;
|
nkeynes@359 | 55 | xlat_cache_block_t xlat_temp_cache_ptr;
|
nkeynes@359 | 56 | xlat_cache_block_t xlat_old_cache;
|
nkeynes@359 | 57 | xlat_cache_block_t xlat_old_cache_ptr;
|
nkeynes@359 | 58 | static void ***xlat_lut;
|
nkeynes@376 | 59 | static gboolean xlat_initialized = FALSE;
|
nkeynes@359 | 60 |
|
nkeynes@422 | 61 | void xlat_cache_init(void)
|
nkeynes@359 | 62 | {
|
nkeynes@376 | 63 | if( !xlat_initialized ) {
|
nkeynes@736 | 64 | xlat_initialized = TRUE;
|
nkeynes@736 | 65 | xlat_new_cache = mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
|
nkeynes@736 | 66 | MAP_PRIVATE|MAP_ANON, -1, 0 );
|
nkeynes@736 | 67 | xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
|
nkeynes@736 | 68 | MAP_PRIVATE|MAP_ANON, -1, 0 );
|
nkeynes@736 | 69 | xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
|
nkeynes@736 | 70 | MAP_PRIVATE|MAP_ANON, -1, 0 );
|
nkeynes@736 | 71 | xlat_new_cache_ptr = xlat_new_cache;
|
nkeynes@736 | 72 | xlat_temp_cache_ptr = xlat_temp_cache;
|
nkeynes@736 | 73 | xlat_old_cache_ptr = xlat_old_cache;
|
nkeynes@736 | 74 | xlat_new_create_ptr = xlat_new_cache;
|
nkeynes@736 | 75 |
|
nkeynes@736 | 76 | xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
|
nkeynes@736 | 77 | MAP_PRIVATE|MAP_ANON, -1, 0);
|
nkeynes@736 | 78 | memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
|
nkeynes@376 | 79 | }
|
nkeynes@359 | 80 | xlat_flush_cache();
|
nkeynes@359 | 81 | }
|
nkeynes@359 | 82 |
|
nkeynes@400 | 83 | void xlat_print_free( FILE *out )
|
nkeynes@400 | 84 | {
|
nkeynes@400 | 85 | fprintf( out, "New space: %d\nTemp space: %d\nOld space: %d\n",
|
nkeynes@736 | 86 | xlat_new_cache_ptr->size, xlat_temp_cache_ptr->size, xlat_old_cache_ptr->size );
|
nkeynes@400 | 87 | }
|
nkeynes@400 | 88 |
|
nkeynes@359 | 89 | /**
|
nkeynes@359 | 90 | * Reset the cache structure to its default state
|
nkeynes@359 | 91 | */
|
nkeynes@359 | 92 | void xlat_flush_cache()
|
nkeynes@359 | 93 | {
|
nkeynes@359 | 94 | xlat_cache_block_t tmp;
|
nkeynes@359 | 95 | int i;
|
nkeynes@359 | 96 | xlat_new_cache_ptr = xlat_new_cache;
|
nkeynes@359 | 97 | xlat_new_cache_ptr->active = 0;
|
nkeynes@359 | 98 | xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
|
nkeynes@359 | 99 | tmp = NEXT(xlat_new_cache_ptr);
|
nkeynes@359 | 100 | tmp->active = 1;
|
nkeynes@359 | 101 | tmp->size = 0;
|
nkeynes@359 | 102 | xlat_temp_cache_ptr = xlat_temp_cache;
|
nkeynes@359 | 103 | xlat_temp_cache_ptr->active = 0;
|
nkeynes@359 | 104 | xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
|
nkeynes@359 | 105 | tmp = NEXT(xlat_temp_cache_ptr);
|
nkeynes@359 | 106 | tmp->active = 1;
|
nkeynes@359 | 107 | tmp->size = 0;
|
nkeynes@359 | 108 | xlat_old_cache_ptr = xlat_old_cache;
|
nkeynes@359 | 109 | xlat_old_cache_ptr->active = 0;
|
nkeynes@359 | 110 | xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
|
nkeynes@359 | 111 | tmp = NEXT(xlat_old_cache_ptr);
|
nkeynes@359 | 112 | tmp->active = 1;
|
nkeynes@359 | 113 | tmp->size = 0;
|
nkeynes@359 | 114 | for( i=0; i<XLAT_LUT_PAGES; i++ ) {
|
nkeynes@736 | 115 | if( xlat_lut[i] != NULL ) {
|
nkeynes@736 | 116 | memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
|
nkeynes@736 | 117 | }
|
nkeynes@359 | 118 | }
|
nkeynes@359 | 119 | }
|
nkeynes@359 | 120 |
|
nkeynes@400 | 121 | static void xlat_flush_page_by_lut( void **page )
|
nkeynes@359 | 122 | {
|
nkeynes@359 | 123 | int i;
|
nkeynes@359 | 124 | for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
|
nkeynes@736 | 125 | if( IS_ENTRY_POINT(page[i]) ) {
|
nkeynes@901 | 126 | XLAT_BLOCK_FOR_CODE(page[i])->active = 0;
|
nkeynes@736 | 127 | }
|
nkeynes@736 | 128 | page[i] = NULL;
|
nkeynes@359 | 129 | }
|
nkeynes@359 | 130 | }
|
nkeynes@359 | 131 |
|
nkeynes@905 | 132 | void FASTCALL xlat_invalidate_word( sh4addr_t addr )
|
nkeynes@400 | 133 | {
|
nkeynes@400 | 134 | if( xlat_lut ) {
|
nkeynes@736 | 135 | void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
|
nkeynes@736 | 136 | if( page != NULL ) {
|
nkeynes@736 | 137 | int entry = XLAT_LUT_ENTRY(addr);
|
nkeynes@736 | 138 | if( page[entry] != NULL ) {
|
nkeynes@736 | 139 | xlat_flush_page_by_lut(page);
|
nkeynes@736 | 140 | }
|
nkeynes@736 | 141 | }
|
nkeynes@400 | 142 | }
|
nkeynes@400 | 143 | }
|
nkeynes@400 | 144 |
|
nkeynes@905 | 145 | void FASTCALL xlat_invalidate_long( sh4addr_t addr )
|
nkeynes@400 | 146 | {
|
nkeynes@400 | 147 | if( xlat_lut ) {
|
nkeynes@736 | 148 | void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
|
nkeynes@736 | 149 | if( page != NULL ) {
|
nkeynes@736 | 150 | int entry = XLAT_LUT_ENTRY(addr);
|
nkeynes@736 | 151 | if( page[entry] != NULL || page[entry+1] != NULL ) {
|
nkeynes@736 | 152 | xlat_flush_page_by_lut(page);
|
nkeynes@736 | 153 | }
|
nkeynes@736 | 154 | }
|
nkeynes@400 | 155 | }
|
nkeynes@400 | 156 | }
|
nkeynes@400 | 157 |
|
nkeynes@905 | 158 | void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
|
nkeynes@400 | 159 | {
|
nkeynes@400 | 160 | int i;
|
nkeynes@400 | 161 | int entry_count = size >> 1; // words;
|
nkeynes@400 | 162 | uint32_t page_no = XLAT_LUT_PAGE(address);
|
nkeynes@400 | 163 | int entry = XLAT_LUT_ENTRY(address);
|
nkeynes@400 | 164 | if( xlat_lut ) {
|
nkeynes@736 | 165 | do {
|
nkeynes@736 | 166 | void **page = xlat_lut[page_no];
|
nkeynes@736 | 167 | int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
|
nkeynes@736 | 168 | if( entry_count < page_entries ) {
|
nkeynes@736 | 169 | page_entries = entry_count;
|
nkeynes@736 | 170 | }
|
nkeynes@736 | 171 | if( page != NULL ) {
|
nkeynes@736 | 172 | if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
|
nkeynes@736 | 173 | /* Overwriting the entire page anyway */
|
nkeynes@736 | 174 | xlat_flush_page_by_lut(page);
|
nkeynes@736 | 175 | } else {
|
nkeynes@736 | 176 | for( i=entry; i<entry+page_entries; i++ ) {
|
nkeynes@736 | 177 | if( page[i] != NULL ) {
|
nkeynes@736 | 178 | xlat_flush_page_by_lut(page);
|
nkeynes@736 | 179 | break;
|
nkeynes@736 | 180 | }
|
nkeynes@736 | 181 | }
|
nkeynes@736 | 182 | }
|
nkeynes@736 | 183 | entry_count -= page_entries;
|
nkeynes@736 | 184 | }
|
nkeynes@736 | 185 | page_no ++;
|
nkeynes@736 | 186 | entry_count -= page_entries;
|
nkeynes@736 | 187 | entry = 0;
|
nkeynes@736 | 188 | } while( entry_count > 0 );
|
nkeynes@400 | 189 | }
|
nkeynes@400 | 190 | }
|
nkeynes@400 | 191 |
|
nkeynes@905 | 192 | void FASTCALL xlat_flush_page( sh4addr_t address )
|
nkeynes@400 | 193 | {
|
nkeynes@400 | 194 | void **page = xlat_lut[XLAT_LUT_PAGE(address)];
|
nkeynes@400 | 195 | if( page != NULL ) {
|
nkeynes@736 | 196 | xlat_flush_page_by_lut(page);
|
nkeynes@400 | 197 | }
|
nkeynes@400 | 198 | }
|
nkeynes@400 | 199 |
|
nkeynes@905 | 200 | void * FASTCALL xlat_get_code( sh4addr_t address )
|
nkeynes@359 | 201 | {
|
nkeynes@410 | 202 | void *result = NULL;
|
nkeynes@359 | 203 | void **page = xlat_lut[XLAT_LUT_PAGE(address)];
|
nkeynes@407 | 204 | if( page != NULL ) {
|
nkeynes@736 | 205 | result = (void *)(((uintptr_t)(page[XLAT_LUT_ENTRY(address)])) & (~((uintptr_t)0x03)));
|
nkeynes@407 | 206 | }
|
nkeynes@407 | 207 | return result;
|
nkeynes@407 | 208 | }
|
nkeynes@407 | 209 |
|
nkeynes@809 | 210 | xlat_recovery_record_t xlat_get_post_recovery( void *code, void *native_pc, gboolean with_terminal )
|
nkeynes@586 | 211 | {
|
nkeynes@586 | 212 | if( code != NULL ) {
|
nkeynes@736 | 213 | uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
|
nkeynes@901 | 214 | xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
|
nkeynes@736 | 215 | uint32_t count = block->recover_table_size;
|
nkeynes@736 | 216 | xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
|
nkeynes@736 | 217 | uint32_t posn;
|
nkeynes@809 | 218 | if( count > 0 && !with_terminal )
|
nkeynes@809 | 219 | count--;
|
nkeynes@809 | 220 | if( records[count-1].xlat_offset < pc_offset ) {
|
nkeynes@809 | 221 | return NULL;
|
nkeynes@736 | 222 | }
|
nkeynes@809 | 223 | for( posn=count-1; posn > 0; posn-- ) {
|
nkeynes@809 | 224 | if( records[posn-1].xlat_offset < pc_offset ) {
|
nkeynes@809 | 225 | return &records[posn];
|
nkeynes@809 | 226 | }
|
nkeynes@809 | 227 | }
|
nkeynes@809 | 228 | return &records[0]; // shouldn't happen
|
nkeynes@586 | 229 | }
|
nkeynes@586 | 230 | return NULL;
|
nkeynes@586 | 231 | }
|
nkeynes@586 | 232 |
|
nkeynes@809 | 233 | xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
|
nkeynes@809 | 234 | {
|
nkeynes@809 | 235 | if( code != NULL ) {
|
nkeynes@809 | 236 | uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
|
nkeynes@901 | 237 | xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
|
nkeynes@809 | 238 | uint32_t count = block->recover_table_size;
|
nkeynes@809 | 239 | xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
|
nkeynes@809 | 240 | uint32_t posn;
|
nkeynes@809 | 241 | for( posn = 1; posn < count; posn++ ) {
|
nkeynes@809 | 242 | if( records[posn].xlat_offset >= pc_offset ) {
|
nkeynes@809 | 243 | return &records[posn-1];
|
nkeynes@809 | 244 | }
|
nkeynes@809 | 245 | }
|
nkeynes@809 | 246 | return &records[count-1];
|
nkeynes@809 | 247 | }
|
nkeynes@809 | 248 | return NULL;
|
nkeynes@809 | 249 | }
|
nkeynes@809 | 250 |
|
nkeynes@905 | 251 | void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
|
nkeynes@407 | 252 | {
|
nkeynes@407 | 253 | void **page = xlat_lut[XLAT_LUT_PAGE(address)];
|
nkeynes@407 | 254 |
|
nkeynes@407 | 255 | /* Add the LUT entry for the block */
|
nkeynes@359 | 256 | if( page == NULL ) {
|
nkeynes@736 | 257 | xlat_lut[XLAT_LUT_PAGE(address)] = page =
|
nkeynes@736 | 258 | mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
|
nkeynes@736 | 259 | MAP_PRIVATE|MAP_ANON, -1, 0 );
|
nkeynes@736 | 260 | memset( page, 0, XLAT_LUT_PAGE_SIZE );
|
nkeynes@359 | 261 | }
|
nkeynes@736 | 262 |
|
nkeynes@407 | 263 | return &page[XLAT_LUT_ENTRY(address)];
|
nkeynes@359 | 264 | }
|
nkeynes@359 | 265 |
|
nkeynes@407 | 266 |
|
nkeynes@407 | 267 |
|
nkeynes@905 | 268 | uint32_t FASTCALL xlat_get_block_size( void *block )
|
nkeynes@366 | 269 | {
|
nkeynes@366 | 270 | xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
|
nkeynes@366 | 271 | return xlt->size;
|
nkeynes@366 | 272 | }
|
nkeynes@366 | 273 |
|
nkeynes@905 | 274 | uint32_t FASTCALL xlat_get_code_size( void *block )
|
nkeynes@586 | 275 | {
|
nkeynes@586 | 276 | xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
|
nkeynes@592 | 277 | if( xlt->recover_table_offset == 0 ) {
|
nkeynes@736 | 278 | return xlt->size;
|
nkeynes@586 | 279 | } else {
|
nkeynes@736 | 280 | return xlt->recover_table_offset;
|
nkeynes@586 | 281 | }
|
nkeynes@586 | 282 | }
|
nkeynes@586 | 283 |
|
nkeynes@359 | 284 | /**
|
nkeynes@359 | 285 | * Cut the specified block so that it has the given size, with the remaining data
|
nkeynes@359 | 286 | * forming a new free block. If the free block would be less than the minimum size,
|
nkeynes@359 | 287 | * the cut is not performed.
|
nkeynes@359 | 288 | * @return the next block after the (possibly cut) block.
|
nkeynes@359 | 289 | */
|
nkeynes@359 | 290 | static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
|
nkeynes@359 | 291 | {
|
nkeynes@407 | 292 | cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
|
nkeynes@410 | 293 | assert( cutsize <= block->size );
|
nkeynes@359 | 294 | if( block->size > cutsize + MIN_TOTAL_SIZE ) {
|
nkeynes@736 | 295 | int oldsize = block->size;
|
nkeynes@736 | 296 | block->size = cutsize;
|
nkeynes@736 | 297 | xlat_cache_block_t next = NEXT(block);
|
nkeynes@736 | 298 | next->active = 0;
|
nkeynes@736 | 299 | next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
|
nkeynes@736 | 300 | return next;
|
nkeynes@359 | 301 | } else {
|
nkeynes@736 | 302 | return NEXT(block);
|
nkeynes@359 | 303 | }
|
nkeynes@359 | 304 | }
|
nkeynes@359 | 305 |
|
nkeynes@359 | 306 | /**
|
nkeynes@359 | 307 | * Promote a block in temp space (or elsewhere for that matter) to old space.
|
nkeynes@359 | 308 | *
|
nkeynes@359 | 309 | * @param block to promote.
|
nkeynes@359 | 310 | */
|
nkeynes@359 | 311 | static void xlat_promote_to_old_space( xlat_cache_block_t block )
|
nkeynes@359 | 312 | {
|
nkeynes@788 | 313 | int allocation = (int)-sizeof(struct xlat_cache_block);
|
nkeynes@359 | 314 | int size = block->size;
|
nkeynes@359 | 315 | xlat_cache_block_t curr = xlat_old_cache_ptr;
|
nkeynes@359 | 316 | xlat_cache_block_t start_block = curr;
|
nkeynes@359 | 317 | do {
|
nkeynes@736 | 318 | allocation += curr->size + sizeof(struct xlat_cache_block);
|
nkeynes@736 | 319 | curr = NEXT(curr);
|
nkeynes@736 | 320 | if( allocation > size ) {
|
nkeynes@736 | 321 | break; /* done */
|
nkeynes@736 | 322 | }
|
nkeynes@736 | 323 | if( curr->size == 0 ) { /* End-of-cache Sentinel */
|
nkeynes@736 | 324 | /* Leave what we just released as free space and start again from the
|
nkeynes@736 | 325 | * top of the cache
|
nkeynes@736 | 326 | */
|
nkeynes@736 | 327 | start_block->active = 0;
|
nkeynes@736 | 328 | start_block->size = allocation;
|
nkeynes@788 | 329 | allocation = (int)-sizeof(struct xlat_cache_block);
|
nkeynes@736 | 330 | start_block = curr = xlat_old_cache;
|
nkeynes@736 | 331 | }
|
nkeynes@359 | 332 | } while(1);
|
nkeynes@359 | 333 | start_block->active = 1;
|
nkeynes@359 | 334 | start_block->size = allocation;
|
nkeynes@359 | 335 | start_block->lut_entry = block->lut_entry;
|
nkeynes@901 | 336 | start_block->fpscr_mask = block->fpscr_mask;
|
nkeynes@901 | 337 | start_block->fpscr = block->fpscr;
|
nkeynes@596 | 338 | start_block->recover_table_offset = block->recover_table_offset;
|
nkeynes@596 | 339 | start_block->recover_table_size = block->recover_table_size;
|
nkeynes@359 | 340 | *block->lut_entry = &start_block->code;
|
nkeynes@359 | 341 | memcpy( start_block->code, block->code, block->size );
|
nkeynes@359 | 342 | xlat_old_cache_ptr = xlat_cut_block(start_block, size );
|
nkeynes@359 | 343 | if( xlat_old_cache_ptr->size == 0 ) {
|
nkeynes@736 | 344 | xlat_old_cache_ptr = xlat_old_cache;
|
nkeynes@359 | 345 | }
|
nkeynes@359 | 346 | }
|
nkeynes@359 | 347 |
|
nkeynes@359 | 348 | /**
|
nkeynes@359 | 349 | * Similarly to the above method, promotes a block to temp space.
|
nkeynes@359 | 350 | * TODO: Try to combine these - they're nearly identical
|
nkeynes@359 | 351 | */
|
nkeynes@359 | 352 | void xlat_promote_to_temp_space( xlat_cache_block_t block )
|
nkeynes@359 | 353 | {
|
nkeynes@359 | 354 | int size = block->size;
|
nkeynes@788 | 355 | int allocation = (int)-sizeof(struct xlat_cache_block);
|
nkeynes@359 | 356 | xlat_cache_block_t curr = xlat_temp_cache_ptr;
|
nkeynes@359 | 357 | xlat_cache_block_t start_block = curr;
|
nkeynes@359 | 358 | do {
|
nkeynes@736 | 359 | if( curr->active == BLOCK_USED ) {
|
nkeynes@736 | 360 | xlat_promote_to_old_space( curr );
|
nkeynes@736 | 361 | } else if( curr->active == BLOCK_ACTIVE ) {
|
nkeynes@736 | 362 | // Active but not used, release block
|
nkeynes@736 | 363 | *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
|
nkeynes@736 | 364 | }
|
nkeynes@736 | 365 | allocation += curr->size + sizeof(struct xlat_cache_block);
|
nkeynes@736 | 366 | curr = NEXT(curr);
|
nkeynes@736 | 367 | if( allocation > size ) {
|
nkeynes@736 | 368 | break; /* done */
|
nkeynes@736 | 369 | }
|
nkeynes@736 | 370 | if( curr->size == 0 ) { /* End-of-cache Sentinel */
|
nkeynes@736 | 371 | /* Leave what we just released as free space and start again from the
|
nkeynes@736 | 372 | * top of the cache
|
nkeynes@736 | 373 | */
|
nkeynes@736 | 374 | start_block->active = 0;
|
nkeynes@736 | 375 | start_block->size = allocation;
|
nkeynes@788 | 376 | allocation = (int)-sizeof(struct xlat_cache_block);
|
nkeynes@736 | 377 | start_block = curr = xlat_temp_cache;
|
nkeynes@736 | 378 | }
|
nkeynes@359 | 379 | } while(1);
|
nkeynes@359 | 380 | start_block->active = 1;
|
nkeynes@359 | 381 | start_block->size = allocation;
|
nkeynes@359 | 382 | start_block->lut_entry = block->lut_entry;
|
nkeynes@901 | 383 | start_block->fpscr_mask = block->fpscr_mask;
|
nkeynes@901 | 384 | start_block->fpscr = block->fpscr;
|
nkeynes@596 | 385 | start_block->recover_table_offset = block->recover_table_offset;
|
nkeynes@596 | 386 | start_block->recover_table_size = block->recover_table_size;
|
nkeynes@359 | 387 | *block->lut_entry = &start_block->code;
|
nkeynes@359 | 388 | memcpy( start_block->code, block->code, block->size );
|
nkeynes@359 | 389 | xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
|
nkeynes@359 | 390 | if( xlat_temp_cache_ptr->size == 0 ) {
|
nkeynes@736 | 391 | xlat_temp_cache_ptr = xlat_temp_cache;
|
nkeynes@359 | 392 | }
|
nkeynes@736 | 393 |
|
nkeynes@359 | 394 | }
|
nkeynes@359 | 395 |
|
nkeynes@359 | 396 | /**
|
nkeynes@359 | 397 | * Returns the next block in the new cache list that can be written to by the
|
nkeynes@359 | 398 | * translator. If the next block is active, it is evicted first.
|
nkeynes@359 | 399 | */
|
nkeynes@359 | 400 | xlat_cache_block_t xlat_start_block( sh4addr_t address )
|
nkeynes@359 | 401 | {
|
nkeynes@359 | 402 | if( xlat_new_cache_ptr->size == 0 ) {
|
nkeynes@736 | 403 | xlat_new_cache_ptr = xlat_new_cache;
|
nkeynes@359 | 404 | }
|
nkeynes@359 | 405 |
|
nkeynes@359 | 406 | if( xlat_new_cache_ptr->active ) {
|
nkeynes@736 | 407 | xlat_promote_to_temp_space( xlat_new_cache_ptr );
|
nkeynes@359 | 408 | }
|
nkeynes@359 | 409 | xlat_new_create_ptr = xlat_new_cache_ptr;
|
nkeynes@359 | 410 | xlat_new_create_ptr->active = 1;
|
nkeynes@359 | 411 | xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
|
nkeynes@359 | 412 |
|
nkeynes@359 | 413 | /* Add the LUT entry for the block */
|
nkeynes@359 | 414 | if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) {
|
nkeynes@736 | 415 | xlat_lut[XLAT_LUT_PAGE(address)] =
|
nkeynes@736 | 416 | mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
|
nkeynes@736 | 417 | MAP_PRIVATE|MAP_ANON, -1, 0 );
|
nkeynes@736 | 418 | memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE );
|
nkeynes@359 | 419 | }
|
nkeynes@359 | 420 |
|
nkeynes@359 | 421 | if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) {
|
nkeynes@901 | 422 | xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_CODE(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]);
|
nkeynes@736 | 423 | oldblock->active = 0;
|
nkeynes@359 | 424 | }
|
nkeynes@359 | 425 |
|
nkeynes@359 | 426 | xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] =
|
nkeynes@736 | 427 | &xlat_new_create_ptr->code;
|
nkeynes@359 | 428 | xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address);
|
nkeynes@736 | 429 |
|
nkeynes@359 | 430 | return xlat_new_create_ptr;
|
nkeynes@359 | 431 | }
|
nkeynes@359 | 432 |
|
nkeynes@410 | 433 | xlat_cache_block_t xlat_extend_block( uint32_t newSize )
|
nkeynes@359 | 434 | {
|
nkeynes@410 | 435 | while( xlat_new_create_ptr->size < newSize ) {
|
nkeynes@736 | 436 | if( xlat_new_cache_ptr->size == 0 ) {
|
nkeynes@736 | 437 | /* Migrate to the front of the cache to keep it contiguous */
|
nkeynes@736 | 438 | xlat_new_create_ptr->active = 0;
|
nkeynes@736 | 439 | sh4ptr_t olddata = xlat_new_create_ptr->code;
|
nkeynes@736 | 440 | int oldsize = xlat_new_create_ptr->size;
|
nkeynes@736 | 441 | int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
|
nkeynes@736 | 442 | void **lut_entry = xlat_new_create_ptr->lut_entry;
|
nkeynes@788 | 443 | int allocation = (int)-sizeof(struct xlat_cache_block);
|
nkeynes@736 | 444 | xlat_new_cache_ptr = xlat_new_cache;
|
nkeynes@736 | 445 | do {
|
nkeynes@736 | 446 | if( xlat_new_cache_ptr->active ) {
|
nkeynes@736 | 447 | xlat_promote_to_temp_space( xlat_new_cache_ptr );
|
nkeynes@736 | 448 | }
|
nkeynes@736 | 449 | allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
|
nkeynes@736 | 450 | xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
|
nkeynes@736 | 451 | } while( allocation < size );
|
nkeynes@736 | 452 | xlat_new_create_ptr = xlat_new_cache;
|
nkeynes@736 | 453 | xlat_new_create_ptr->active = 1;
|
nkeynes@736 | 454 | xlat_new_create_ptr->size = allocation;
|
nkeynes@736 | 455 | xlat_new_create_ptr->lut_entry = lut_entry;
|
nkeynes@736 | 456 | *lut_entry = &xlat_new_create_ptr->code;
|
nkeynes@736 | 457 | memmove( xlat_new_create_ptr->code, olddata, oldsize );
|
nkeynes@736 | 458 | } else {
|
nkeynes@736 | 459 | if( xlat_new_cache_ptr->active ) {
|
nkeynes@736 | 460 | xlat_promote_to_temp_space( xlat_new_cache_ptr );
|
nkeynes@736 | 461 | }
|
nkeynes@736 | 462 | xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
|
nkeynes@736 | 463 | xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
|
nkeynes@736 | 464 | }
|
nkeynes@359 | 465 | }
|
nkeynes@359 | 466 | return xlat_new_create_ptr;
|
nkeynes@359 | 467 |
|
nkeynes@359 | 468 | }
|
nkeynes@359 | 469 |
|
nkeynes@359 | 470 | void xlat_commit_block( uint32_t destsize, uint32_t srcsize )
|
nkeynes@359 | 471 | {
|
nkeynes@359 | 472 | void **ptr = xlat_new_create_ptr->lut_entry;
|
nkeynes@359 | 473 | void **endptr = ptr + (srcsize>>2);
|
nkeynes@359 | 474 | while( ptr < endptr ) {
|
nkeynes@736 | 475 | if( *ptr == NULL ) {
|
nkeynes@736 | 476 | *ptr = XLAT_LUT_ENTRY_USED;
|
nkeynes@736 | 477 | }
|
nkeynes@736 | 478 | ptr++;
|
nkeynes@359 | 479 | }
|
nkeynes@359 | 480 |
|
nkeynes@359 | 481 | xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
|
nkeynes@359 | 482 | }
|
nkeynes@359 | 483 |
|
nkeynes@359 | 484 | void xlat_delete_block( xlat_cache_block_t block )
|
nkeynes@359 | 485 | {
|
nkeynes@359 | 486 | block->active = 0;
|
nkeynes@359 | 487 | *block->lut_entry = NULL;
|
nkeynes@359 | 488 | }
|
nkeynes@359 | 489 |
|
nkeynes@359 | 490 | void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
|
nkeynes@359 | 491 | {
|
nkeynes@359 | 492 | int foundptr = 0;
|
nkeynes@359 | 493 | xlat_cache_block_t tail =
|
nkeynes@736 | 494 | (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
|
nkeynes@359 | 495 |
|
nkeynes@359 | 496 | assert( tail->active == 1 );
|
nkeynes@359 | 497 | assert( tail->size == 0 );
|
nkeynes@359 | 498 | while( cache < tail ) {
|
nkeynes@736 | 499 | assert( cache->active >= 0 && cache->active <= 2 );
|
nkeynes@736 | 500 | assert( cache->size >= 0 && cache->size < size );
|
nkeynes@736 | 501 | if( cache == ptr ) {
|
nkeynes@736 | 502 | foundptr = 1;
|
nkeynes@736 | 503 | }
|
nkeynes@736 | 504 | cache = NEXT(cache);
|
nkeynes@359 | 505 | }
|
nkeynes@359 | 506 | assert( cache == tail );
|
nkeynes@596 | 507 | assert( foundptr == 1 || tail == ptr );
|
nkeynes@359 | 508 | }
|
nkeynes@359 | 509 |
|
nkeynes@359 | 510 | void xlat_check_integrity( )
|
nkeynes@359 | 511 | {
|
nkeynes@359 | 512 | xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
|
nkeynes@359 | 513 | xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
|
nkeynes@359 | 514 | xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
|
nkeynes@359 | 515 | }
|
nkeynes@376 | 516 |
|