filename | src/xlat/xltcache.c |
changeset | 1149:da6124fceec6 |
prev | 1126:1f2c7cdee73e |
next | 1175:712c418cad83 |
author | nkeynes |
date | Wed Nov 10 08:37:42 2010 +1000 (11 years ago) |
permissions | -rw-r--r-- |
last change | Add chain pointer to the xlat cache, so that we can maintain multiple blocks for the same address. This prevents thrashing in cases where we would other keep retranslating the same blocks over and over again due to varying xlat_sh4_mode values |
file | annotate | diff | log | raw |
nkeynes@991 | 1 | /** |
nkeynes@991 | 2 | * $Id$ |
nkeynes@991 | 3 | * |
nkeynes@991 | 4 | * Translation cache management. This part is architecture independent. |
nkeynes@991 | 5 | * |
nkeynes@991 | 6 | * Copyright (c) 2005 Nathan Keynes. |
nkeynes@991 | 7 | * |
nkeynes@991 | 8 | * This program is free software; you can redistribute it and/or modify |
nkeynes@991 | 9 | * it under the terms of the GNU General Public License as published by |
nkeynes@991 | 10 | * the Free Software Foundation; either version 2 of the License, or |
nkeynes@991 | 11 | * (at your option) any later version. |
nkeynes@991 | 12 | * |
nkeynes@991 | 13 | * This program is distributed in the hope that it will be useful, |
nkeynes@991 | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
nkeynes@991 | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
nkeynes@991 | 16 | * GNU General Public License for more details. |
nkeynes@991 | 17 | */ |
nkeynes@991 | 18 | |
nkeynes@991 | 19 | #include <sys/types.h> |
nkeynes@991 | 20 | #include <sys/mman.h> |
nkeynes@991 | 21 | #include <assert.h> |
nkeynes@991 | 22 | |
nkeynes@991 | 23 | #include "dreamcast.h" |
nkeynes@991 | 24 | #include "sh4/sh4core.h" |
nkeynes@991 | 25 | #include "xlat/xltcache.h" |
nkeynes@991 | 26 | #include "x86dasm/x86dasm.h" |
nkeynes@991 | 27 | |
nkeynes@991 | 28 | #define XLAT_LUT_PAGE_BITS 12 |
nkeynes@991 | 29 | #define XLAT_LUT_TOTAL_BITS 28 |
nkeynes@991 | 30 | #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF) |
nkeynes@991 | 31 | #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1) |
nkeynes@991 | 32 | |
nkeynes@991 | 33 | #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS)) |
nkeynes@991 | 34 | #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS) |
nkeynes@991 | 35 | #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *)) |
nkeynes@991 | 36 | |
nkeynes@991 | 37 | #define XLAT_LUT_ENTRY_EMPTY (void *)0 |
nkeynes@991 | 38 | #define XLAT_LUT_ENTRY_USED (void *)1 |
nkeynes@991 | 39 | |
nkeynes@991 | 40 | #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size])) |
nkeynes@991 | 41 | #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED) |
nkeynes@991 | 42 | #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY) |
nkeynes@991 | 43 | |
nkeynes@991 | 44 | #define MIN_BLOCK_SIZE 32 |
nkeynes@991 | 45 | #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE) |
nkeynes@991 | 46 | |
nkeynes@991 | 47 | #define BLOCK_INACTIVE 0 |
nkeynes@991 | 48 | #define BLOCK_ACTIVE 1 |
nkeynes@991 | 49 | #define BLOCK_USED 2 |
nkeynes@991 | 50 | |
nkeynes@991 | 51 | xlat_cache_block_t xlat_new_cache; |
nkeynes@991 | 52 | xlat_cache_block_t xlat_new_cache_ptr; |
nkeynes@991 | 53 | xlat_cache_block_t xlat_new_create_ptr; |
nkeynes@991 | 54 | |
nkeynes@991 | 55 | #ifdef XLAT_GENERATIONAL_CACHE |
nkeynes@991 | 56 | xlat_cache_block_t xlat_temp_cache; |
nkeynes@991 | 57 | xlat_cache_block_t xlat_temp_cache_ptr; |
nkeynes@991 | 58 | xlat_cache_block_t xlat_old_cache; |
nkeynes@991 | 59 | xlat_cache_block_t xlat_old_cache_ptr; |
nkeynes@991 | 60 | #endif |
nkeynes@991 | 61 | |
nkeynes@991 | 62 | static void **xlat_lut[XLAT_LUT_PAGES]; |
nkeynes@991 | 63 | static gboolean xlat_initialized = FALSE; |
nkeynes@991 | 64 | |
nkeynes@991 | 65 | void xlat_cache_init(void) |
nkeynes@991 | 66 | { |
nkeynes@991 | 67 | if( !xlat_initialized ) { |
nkeynes@991 | 68 | xlat_initialized = TRUE; |
nkeynes@991 | 69 | xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, |
nkeynes@991 | 70 | MAP_PRIVATE|MAP_ANON, -1, 0 ); |
nkeynes@991 | 71 | xlat_new_cache_ptr = xlat_new_cache; |
nkeynes@991 | 72 | xlat_new_create_ptr = xlat_new_cache; |
nkeynes@991 | 73 | #ifdef XLAT_GENERATIONAL_CACHE |
nkeynes@991 | 74 | xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, |
nkeynes@991 | 75 | MAP_PRIVATE|MAP_ANON, -1, 0 ); |
nkeynes@991 | 76 | xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, |
nkeynes@991 | 77 | MAP_PRIVATE|MAP_ANON, -1, 0 ); |
nkeynes@991 | 78 | xlat_temp_cache_ptr = xlat_temp_cache; |
nkeynes@991 | 79 | xlat_old_cache_ptr = xlat_old_cache; |
nkeynes@991 | 80 | #endif |
nkeynes@991 | 81 | // xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE, |
nkeynes@991 | 82 | // MAP_PRIVATE|MAP_ANON, -1, 0); |
nkeynes@991 | 83 | memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) ); |
nkeynes@991 | 84 | } |
nkeynes@991 | 85 | xlat_flush_cache(); |
nkeynes@991 | 86 | } |
nkeynes@991 | 87 | |
nkeynes@991 | 88 | /** |
nkeynes@991 | 89 | * Reset the cache structure to its default state |
nkeynes@991 | 90 | */ |
nkeynes@991 | 91 | void xlat_flush_cache() |
nkeynes@991 | 92 | { |
nkeynes@991 | 93 | xlat_cache_block_t tmp; |
nkeynes@991 | 94 | int i; |
nkeynes@991 | 95 | xlat_new_cache_ptr = xlat_new_cache; |
nkeynes@991 | 96 | xlat_new_cache_ptr->active = 0; |
nkeynes@991 | 97 | xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block); |
nkeynes@991 | 98 | tmp = NEXT(xlat_new_cache_ptr); |
nkeynes@991 | 99 | tmp->active = 1; |
nkeynes@991 | 100 | tmp->size = 0; |
nkeynes@991 | 101 | #ifdef XLAT_GENERATIONAL_CACHE |
nkeynes@991 | 102 | xlat_temp_cache_ptr = xlat_temp_cache; |
nkeynes@991 | 103 | xlat_temp_cache_ptr->active = 0; |
nkeynes@991 | 104 | xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block); |
nkeynes@991 | 105 | tmp = NEXT(xlat_temp_cache_ptr); |
nkeynes@991 | 106 | tmp->active = 1; |
nkeynes@991 | 107 | tmp->size = 0; |
nkeynes@991 | 108 | xlat_old_cache_ptr = xlat_old_cache; |
nkeynes@991 | 109 | xlat_old_cache_ptr->active = 0; |
nkeynes@991 | 110 | xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block); |
nkeynes@991 | 111 | tmp = NEXT(xlat_old_cache_ptr); |
nkeynes@991 | 112 | tmp->active = 1; |
nkeynes@991 | 113 | tmp->size = 0; |
nkeynes@991 | 114 | #endif |
nkeynes@991 | 115 | for( i=0; i<XLAT_LUT_PAGES; i++ ) { |
nkeynes@991 | 116 | if( xlat_lut[i] != NULL ) { |
nkeynes@991 | 117 | memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE ); |
nkeynes@991 | 118 | } |
nkeynes@991 | 119 | } |
nkeynes@991 | 120 | } |
nkeynes@991 | 121 | |
nkeynes@991 | 122 | static void xlat_flush_page_by_lut( void **page ) |
nkeynes@991 | 123 | { |
nkeynes@991 | 124 | int i; |
nkeynes@991 | 125 | for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) { |
nkeynes@991 | 126 | if( IS_ENTRY_POINT(page[i]) ) { |
nkeynes@1149 | 127 | void *p = page[i]; |
nkeynes@1149 | 128 | do { |
nkeynes@1149 | 129 | xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p); |
nkeynes@1149 | 130 | block->active = 0; |
nkeynes@1149 | 131 | p = block->chain; |
nkeynes@1149 | 132 | } while( p != NULL ); |
nkeynes@991 | 133 | } |
nkeynes@991 | 134 | page[i] = NULL; |
nkeynes@991 | 135 | } |
nkeynes@991 | 136 | } |
nkeynes@991 | 137 | |
nkeynes@991 | 138 | void FASTCALL xlat_invalidate_word( sh4addr_t addr ) |
nkeynes@991 | 139 | { |
nkeynes@991 | 140 | void **page = xlat_lut[XLAT_LUT_PAGE(addr)]; |
nkeynes@991 | 141 | if( page != NULL ) { |
nkeynes@991 | 142 | int entry = XLAT_LUT_ENTRY(addr); |
nkeynes@991 | 143 | if( page[entry] != NULL ) { |
nkeynes@991 | 144 | xlat_flush_page_by_lut(page); |
nkeynes@991 | 145 | } |
nkeynes@991 | 146 | } |
nkeynes@991 | 147 | } |
nkeynes@991 | 148 | |
nkeynes@991 | 149 | void FASTCALL xlat_invalidate_long( sh4addr_t addr ) |
nkeynes@991 | 150 | { |
nkeynes@991 | 151 | void **page = xlat_lut[XLAT_LUT_PAGE(addr)]; |
nkeynes@991 | 152 | if( page != NULL ) { |
nkeynes@991 | 153 | int entry = XLAT_LUT_ENTRY(addr); |
nkeynes@991 | 154 | if( *(uint64_t *)&page[entry] != 0 ) { |
nkeynes@991 | 155 | xlat_flush_page_by_lut(page); |
nkeynes@991 | 156 | } |
nkeynes@991 | 157 | } |
nkeynes@991 | 158 | } |
nkeynes@991 | 159 | |
nkeynes@991 | 160 | void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size ) |
nkeynes@991 | 161 | { |
nkeynes@991 | 162 | int i; |
nkeynes@991 | 163 | int entry_count = size >> 1; // words; |
nkeynes@991 | 164 | uint32_t page_no = XLAT_LUT_PAGE(address); |
nkeynes@991 | 165 | int entry = XLAT_LUT_ENTRY(address); |
nkeynes@991 | 166 | do { |
nkeynes@991 | 167 | void **page = xlat_lut[page_no]; |
nkeynes@991 | 168 | int page_entries = XLAT_LUT_PAGE_ENTRIES - entry; |
nkeynes@991 | 169 | if( entry_count < page_entries ) { |
nkeynes@991 | 170 | page_entries = entry_count; |
nkeynes@991 | 171 | } |
nkeynes@991 | 172 | if( page != NULL ) { |
nkeynes@991 | 173 | if( page_entries == XLAT_LUT_PAGE_ENTRIES ) { |
nkeynes@991 | 174 | /* Overwriting the entire page anyway */ |
nkeynes@991 | 175 | xlat_flush_page_by_lut(page); |
nkeynes@991 | 176 | } else { |
nkeynes@991 | 177 | for( i=entry; i<entry+page_entries; i++ ) { |
nkeynes@991 | 178 | if( page[i] != NULL ) { |
nkeynes@991 | 179 | xlat_flush_page_by_lut(page); |
nkeynes@991 | 180 | break; |
nkeynes@991 | 181 | } |
nkeynes@991 | 182 | } |
nkeynes@991 | 183 | } |
nkeynes@991 | 184 | entry_count -= page_entries; |
nkeynes@991 | 185 | } |
nkeynes@991 | 186 | page_no ++; |
nkeynes@991 | 187 | entry_count -= page_entries; |
nkeynes@991 | 188 | entry = 0; |
nkeynes@991 | 189 | } while( entry_count > 0 ); |
nkeynes@991 | 190 | } |
nkeynes@991 | 191 | |
nkeynes@991 | 192 | void FASTCALL xlat_flush_page( sh4addr_t address ) |
nkeynes@991 | 193 | { |
nkeynes@991 | 194 | void **page = xlat_lut[XLAT_LUT_PAGE(address)]; |
nkeynes@991 | 195 | if( page != NULL ) { |
nkeynes@991 | 196 | xlat_flush_page_by_lut(page); |
nkeynes@991 | 197 | } |
nkeynes@991 | 198 | } |
nkeynes@991 | 199 | |
nkeynes@991 | 200 | void * FASTCALL xlat_get_code( sh4addr_t address ) |
nkeynes@991 | 201 | { |
nkeynes@991 | 202 | void *result = NULL; |
nkeynes@991 | 203 | void **page = xlat_lut[XLAT_LUT_PAGE(address)]; |
nkeynes@991 | 204 | if( page != NULL ) { |
nkeynes@991 | 205 | result = (void *)(((uintptr_t)(page[XLAT_LUT_ENTRY(address)])) & (~((uintptr_t)0x03))); |
nkeynes@991 | 206 | } |
nkeynes@991 | 207 | return result; |
nkeynes@991 | 208 | } |
nkeynes@991 | 209 | |
nkeynes@991 | 210 | xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc ) |
nkeynes@991 | 211 | { |
nkeynes@991 | 212 | if( code != NULL ) { |
nkeynes@991 | 213 | uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code); |
nkeynes@991 | 214 | xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code); |
nkeynes@991 | 215 | uint32_t count = block->recover_table_size; |
nkeynes@991 | 216 | xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]); |
nkeynes@991 | 217 | uint32_t posn; |
nkeynes@991 | 218 | for( posn = 1; posn < count; posn++ ) { |
nkeynes@1003 | 219 | if( records[posn].xlat_offset >= pc_offset ) { |
nkeynes@991 | 220 | return &records[posn-1]; |
nkeynes@991 | 221 | } |
nkeynes@991 | 222 | } |
nkeynes@991 | 223 | return &records[count-1]; |
nkeynes@991 | 224 | } |
nkeynes@991 | 225 | return NULL; |
nkeynes@991 | 226 | } |
nkeynes@991 | 227 | |
nkeynes@991 | 228 | void ** FASTCALL xlat_get_lut_entry( sh4addr_t address ) |
nkeynes@991 | 229 | { |
nkeynes@991 | 230 | void **page = xlat_lut[XLAT_LUT_PAGE(address)]; |
nkeynes@991 | 231 | |
nkeynes@991 | 232 | /* Add the LUT entry for the block */ |
nkeynes@991 | 233 | if( page == NULL ) { |
nkeynes@991 | 234 | xlat_lut[XLAT_LUT_PAGE(address)] = page = |
nkeynes@991 | 235 | (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE, |
nkeynes@991 | 236 | MAP_PRIVATE|MAP_ANON, -1, 0 ); |
nkeynes@991 | 237 | memset( page, 0, XLAT_LUT_PAGE_SIZE ); |
nkeynes@991 | 238 | } |
nkeynes@991 | 239 | |
nkeynes@991 | 240 | return &page[XLAT_LUT_ENTRY(address)]; |
nkeynes@991 | 241 | } |
nkeynes@991 | 242 | |
nkeynes@991 | 243 | |
nkeynes@991 | 244 | |
nkeynes@991 | 245 | uint32_t FASTCALL xlat_get_block_size( void *block ) |
nkeynes@991 | 246 | { |
nkeynes@991 | 247 | xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block)); |
nkeynes@991 | 248 | return xlt->size; |
nkeynes@991 | 249 | } |
nkeynes@991 | 250 | |
nkeynes@991 | 251 | uint32_t FASTCALL xlat_get_code_size( void *block ) |
nkeynes@991 | 252 | { |
nkeynes@991 | 253 | xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block)); |
nkeynes@991 | 254 | if( xlt->recover_table_offset == 0 ) { |
nkeynes@991 | 255 | return xlt->size; |
nkeynes@991 | 256 | } else { |
nkeynes@991 | 257 | return xlt->recover_table_offset; |
nkeynes@991 | 258 | } |
nkeynes@991 | 259 | } |
nkeynes@991 | 260 | |
nkeynes@991 | 261 | /** |
nkeynes@991 | 262 | * Cut the specified block so that it has the given size, with the remaining data |
nkeynes@991 | 263 | * forming a new free block. If the free block would be less than the minimum size, |
nkeynes@991 | 264 | * the cut is not performed. |
nkeynes@991 | 265 | * @return the next block after the (possibly cut) block. |
nkeynes@991 | 266 | */ |
nkeynes@991 | 267 | static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize ) |
nkeynes@991 | 268 | { |
nkeynes@991 | 269 | cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment |
nkeynes@991 | 270 | assert( cutsize <= block->size ); |
nkeynes@991 | 271 | if( block->size > cutsize + MIN_TOTAL_SIZE ) { |
nkeynes@991 | 272 | int oldsize = block->size; |
nkeynes@991 | 273 | block->size = cutsize; |
nkeynes@991 | 274 | xlat_cache_block_t next = NEXT(block); |
nkeynes@991 | 275 | next->active = 0; |
nkeynes@991 | 276 | next->size = oldsize - cutsize - sizeof(struct xlat_cache_block); |
nkeynes@991 | 277 | return next; |
nkeynes@991 | 278 | } else { |
nkeynes@991 | 279 | return NEXT(block); |
nkeynes@991 | 280 | } |
nkeynes@991 | 281 | } |
nkeynes@991 | 282 | |
nkeynes@991 | 283 | #ifdef XLAT_GENERATIONAL_CACHE |
nkeynes@991 | 284 | /** |
nkeynes@991 | 285 | * Promote a block in temp space (or elsewhere for that matter) to old space. |
nkeynes@991 | 286 | * |
nkeynes@991 | 287 | * @param block to promote. |
nkeynes@991 | 288 | */ |
nkeynes@991 | 289 | static void xlat_promote_to_old_space( xlat_cache_block_t block ) |
nkeynes@991 | 290 | { |
nkeynes@991 | 291 | int allocation = (int)-sizeof(struct xlat_cache_block); |
nkeynes@991 | 292 | int size = block->size; |
nkeynes@991 | 293 | xlat_cache_block_t curr = xlat_old_cache_ptr; |
nkeynes@991 | 294 | xlat_cache_block_t start_block = curr; |
nkeynes@991 | 295 | do { |
nkeynes@991 | 296 | allocation += curr->size + sizeof(struct xlat_cache_block); |
nkeynes@991 | 297 | curr = NEXT(curr); |
nkeynes@991 | 298 | if( allocation > size ) { |
nkeynes@991 | 299 | break; /* done */ |
nkeynes@991 | 300 | } |
nkeynes@991 | 301 | if( curr->size == 0 ) { /* End-of-cache Sentinel */ |
nkeynes@991 | 302 | /* Leave what we just released as free space and start again from the |
nkeynes@991 | 303 | * top of the cache |
nkeynes@991 | 304 | */ |
nkeynes@991 | 305 | start_block->active = 0; |
nkeynes@991 | 306 | start_block->size = allocation; |
nkeynes@991 | 307 | allocation = (int)-sizeof(struct xlat_cache_block); |
nkeynes@991 | 308 | start_block = curr = xlat_old_cache; |
nkeynes@991 | 309 | } |
nkeynes@991 | 310 | } while(1); |
nkeynes@991 | 311 | start_block->active = 1; |
nkeynes@991 | 312 | start_block->size = allocation; |
nkeynes@991 | 313 | start_block->lut_entry = block->lut_entry; |
nkeynes@1149 | 314 | start_block->chain = block->chain; |
nkeynes@991 | 315 | start_block->fpscr_mask = block->fpscr_mask; |
nkeynes@991 | 316 | start_block->fpscr = block->fpscr; |
nkeynes@991 | 317 | start_block->recover_table_offset = block->recover_table_offset; |
nkeynes@991 | 318 | start_block->recover_table_size = block->recover_table_size; |
nkeynes@991 | 319 | *block->lut_entry = &start_block->code; |
nkeynes@991 | 320 | memcpy( start_block->code, block->code, block->size ); |
nkeynes@991 | 321 | xlat_old_cache_ptr = xlat_cut_block(start_block, size ); |
nkeynes@991 | 322 | if( xlat_old_cache_ptr->size == 0 ) { |
nkeynes@991 | 323 | xlat_old_cache_ptr = xlat_old_cache; |
nkeynes@991 | 324 | } |
nkeynes@991 | 325 | } |
nkeynes@991 | 326 | |
nkeynes@991 | 327 | /** |
nkeynes@991 | 328 | * Similarly to the above method, promotes a block to temp space. |
nkeynes@991 | 329 | * TODO: Try to combine these - they're nearly identical |
nkeynes@991 | 330 | */ |
nkeynes@991 | 331 | void xlat_promote_to_temp_space( xlat_cache_block_t block ) |
nkeynes@991 | 332 | { |
nkeynes@991 | 333 | int size = block->size; |
nkeynes@991 | 334 | int allocation = (int)-sizeof(struct xlat_cache_block); |
nkeynes@991 | 335 | xlat_cache_block_t curr = xlat_temp_cache_ptr; |
nkeynes@991 | 336 | xlat_cache_block_t start_block = curr; |
nkeynes@991 | 337 | do { |
nkeynes@991 | 338 | if( curr->active == BLOCK_USED ) { |
nkeynes@991 | 339 | xlat_promote_to_old_space( curr ); |
nkeynes@991 | 340 | } else if( curr->active == BLOCK_ACTIVE ) { |
nkeynes@991 | 341 | // Active but not used, release block |
nkeynes@991 | 342 | *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03); |
nkeynes@991 | 343 | } |
nkeynes@991 | 344 | allocation += curr->size + sizeof(struct xlat_cache_block); |
nkeynes@991 | 345 | curr = NEXT(curr); |
nkeynes@991 | 346 | if( allocation > size ) { |
nkeynes@991 | 347 | break; /* done */ |
nkeynes@991 | 348 | } |
nkeynes@991 | 349 | if( curr->size == 0 ) { /* End-of-cache Sentinel */ |
nkeynes@991 | 350 | /* Leave what we just released as free space and start again from the |
nkeynes@991 | 351 | * top of the cache |
nkeynes@991 | 352 | */ |
nkeynes@991 | 353 | start_block->active = 0; |
nkeynes@991 | 354 | start_block->size = allocation; |
nkeynes@991 | 355 | allocation = (int)-sizeof(struct xlat_cache_block); |
nkeynes@991 | 356 | start_block = curr = xlat_temp_cache; |
nkeynes@991 | 357 | } |
nkeynes@991 | 358 | } while(1); |
nkeynes@991 | 359 | start_block->active = 1; |
nkeynes@991 | 360 | start_block->size = allocation; |
nkeynes@991 | 361 | start_block->lut_entry = block->lut_entry; |
nkeynes@1149 | 362 | start_block->chain = block->chain; |
nkeynes@991 | 363 | start_block->fpscr_mask = block->fpscr_mask; |
nkeynes@991 | 364 | start_block->fpscr = block->fpscr; |
nkeynes@991 | 365 | start_block->recover_table_offset = block->recover_table_offset; |
nkeynes@991 | 366 | start_block->recover_table_size = block->recover_table_size; |
nkeynes@991 | 367 | *block->lut_entry = &start_block->code; |
nkeynes@991 | 368 | memcpy( start_block->code, block->code, block->size ); |
nkeynes@991 | 369 | xlat_temp_cache_ptr = xlat_cut_block(start_block, size ); |
nkeynes@991 | 370 | if( xlat_temp_cache_ptr->size == 0 ) { |
nkeynes@991 | 371 | xlat_temp_cache_ptr = xlat_temp_cache; |
nkeynes@991 | 372 | } |
nkeynes@991 | 373 | |
nkeynes@991 | 374 | } |
nkeynes@991 | 375 | #else |
nkeynes@991 | 376 | void xlat_promote_to_temp_space( xlat_cache_block_t block ) |
nkeynes@991 | 377 | { |
nkeynes@991 | 378 | *block->lut_entry = 0; |
nkeynes@991 | 379 | } |
nkeynes@991 | 380 | #endif |
nkeynes@991 | 381 | |
nkeynes@991 | 382 | /** |
nkeynes@991 | 383 | * Returns the next block in the new cache list that can be written to by the |
nkeynes@991 | 384 | * translator. If the next block is active, it is evicted first. |
nkeynes@991 | 385 | */ |
nkeynes@991 | 386 | xlat_cache_block_t xlat_start_block( sh4addr_t address ) |
nkeynes@991 | 387 | { |
nkeynes@991 | 388 | if( xlat_new_cache_ptr->size == 0 ) { |
nkeynes@991 | 389 | xlat_new_cache_ptr = xlat_new_cache; |
nkeynes@991 | 390 | } |
nkeynes@991 | 391 | |
nkeynes@991 | 392 | if( xlat_new_cache_ptr->active ) { |
nkeynes@991 | 393 | xlat_promote_to_temp_space( xlat_new_cache_ptr ); |
nkeynes@991 | 394 | } |
nkeynes@991 | 395 | xlat_new_create_ptr = xlat_new_cache_ptr; |
nkeynes@991 | 396 | xlat_new_create_ptr->active = 1; |
nkeynes@991 | 397 | xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr); |
nkeynes@991 | 398 | |
nkeynes@991 | 399 | /* Add the LUT entry for the block */ |
nkeynes@991 | 400 | if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) { |
nkeynes@991 | 401 | xlat_lut[XLAT_LUT_PAGE(address)] = |
nkeynes@991 | 402 | (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE, |
nkeynes@991 | 403 | MAP_PRIVATE|MAP_ANON, -1, 0 ); |
nkeynes@991 | 404 | memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE ); |
nkeynes@991 | 405 | } |
nkeynes@991 | 406 | |
nkeynes@991 | 407 | if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) { |
nkeynes@1149 | 408 | void *p = xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]; |
nkeynes@1149 | 409 | xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_CODE(p); |
nkeynes@1149 | 410 | assert( oldblock->active ); |
nkeynes@1149 | 411 | xlat_new_create_ptr->chain = p; |
nkeynes@1149 | 412 | } else { |
nkeynes@1149 | 413 | xlat_new_create_ptr->chain = NULL; |
nkeynes@991 | 414 | } |
nkeynes@991 | 415 | |
nkeynes@991 | 416 | xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] = |
nkeynes@991 | 417 | &xlat_new_create_ptr->code; |
nkeynes@991 | 418 | xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address); |
nkeynes@991 | 419 | |
nkeynes@991 | 420 | return xlat_new_create_ptr; |
nkeynes@991 | 421 | } |
nkeynes@991 | 422 | |
nkeynes@991 | 423 | xlat_cache_block_t xlat_extend_block( uint32_t newSize ) |
nkeynes@991 | 424 | { |
nkeynes@991 | 425 | while( xlat_new_create_ptr->size < newSize ) { |
nkeynes@991 | 426 | if( xlat_new_cache_ptr->size == 0 ) { |
nkeynes@991 | 427 | /* Migrate to the front of the cache to keep it contiguous */ |
nkeynes@991 | 428 | xlat_new_create_ptr->active = 0; |
nkeynes@991 | 429 | sh4ptr_t olddata = xlat_new_create_ptr->code; |
nkeynes@991 | 430 | int oldsize = xlat_new_create_ptr->size; |
nkeynes@991 | 431 | int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */ |
nkeynes@991 | 432 | void **lut_entry = xlat_new_create_ptr->lut_entry; |
nkeynes@1149 | 433 | void *chain = xlat_new_create_ptr->chain; |
nkeynes@991 | 434 | int allocation = (int)-sizeof(struct xlat_cache_block); |
nkeynes@991 | 435 | xlat_new_cache_ptr = xlat_new_cache; |
nkeynes@991 | 436 | do { |
nkeynes@991 | 437 | if( xlat_new_cache_ptr->active ) { |
nkeynes@991 | 438 | xlat_promote_to_temp_space( xlat_new_cache_ptr ); |
nkeynes@991 | 439 | } |
nkeynes@991 | 440 | allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block); |
nkeynes@991 | 441 | xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr); |
nkeynes@991 | 442 | } while( allocation < size ); |
nkeynes@991 | 443 | xlat_new_create_ptr = xlat_new_cache; |
nkeynes@991 | 444 | xlat_new_create_ptr->active = 1; |
nkeynes@991 | 445 | xlat_new_create_ptr->size = allocation; |
nkeynes@991 | 446 | xlat_new_create_ptr->lut_entry = lut_entry; |
nkeynes@1149 | 447 | xlat_new_create_ptr->chain = chain; |
nkeynes@991 | 448 | *lut_entry = &xlat_new_create_ptr->code; |
nkeynes@991 | 449 | memmove( xlat_new_create_ptr->code, olddata, oldsize ); |
nkeynes@991 | 450 | } else { |
nkeynes@991 | 451 | if( xlat_new_cache_ptr->active ) { |
nkeynes@991 | 452 | xlat_promote_to_temp_space( xlat_new_cache_ptr ); |
nkeynes@991 | 453 | } |
nkeynes@991 | 454 | xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block); |
nkeynes@991 | 455 | xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr); |
nkeynes@991 | 456 | } |
nkeynes@991 | 457 | } |
nkeynes@991 | 458 | return xlat_new_create_ptr; |
nkeynes@991 | 459 | |
nkeynes@991 | 460 | } |
nkeynes@991 | 461 | |
nkeynes@991 | 462 | void xlat_commit_block( uint32_t destsize, uint32_t srcsize ) |
nkeynes@991 | 463 | { |
nkeynes@991 | 464 | void **ptr = xlat_new_create_ptr->lut_entry; |
nkeynes@1126 | 465 | void **endptr = ptr + (srcsize>>1); |
nkeynes@991 | 466 | while( ptr < endptr ) { |
nkeynes@991 | 467 | if( *ptr == NULL ) { |
nkeynes@991 | 468 | *ptr = XLAT_LUT_ENTRY_USED; |
nkeynes@991 | 469 | } |
nkeynes@991 | 470 | ptr++; |
nkeynes@991 | 471 | } |
nkeynes@991 | 472 | |
nkeynes@991 | 473 | xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize ); |
nkeynes@991 | 474 | } |
nkeynes@991 | 475 | |
nkeynes@991 | 476 | void xlat_delete_block( xlat_cache_block_t block ) |
nkeynes@991 | 477 | { |
nkeynes@991 | 478 | block->active = 0; |
nkeynes@991 | 479 | *block->lut_entry = NULL; |
nkeynes@991 | 480 | } |
nkeynes@991 | 481 | |
nkeynes@991 | 482 | void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size ) |
nkeynes@991 | 483 | { |
nkeynes@991 | 484 | int foundptr = 0; |
nkeynes@991 | 485 | xlat_cache_block_t tail = |
nkeynes@991 | 486 | (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block)); |
nkeynes@991 | 487 | |
nkeynes@991 | 488 | assert( tail->active == 1 ); |
nkeynes@991 | 489 | assert( tail->size == 0 ); |
nkeynes@991 | 490 | while( cache < tail ) { |
nkeynes@991 | 491 | assert( cache->active >= 0 && cache->active <= 2 ); |
nkeynes@991 | 492 | assert( cache->size >= 0 && cache->size < size ); |
nkeynes@991 | 493 | if( cache == ptr ) { |
nkeynes@991 | 494 | foundptr = 1; |
nkeynes@991 | 495 | } |
nkeynes@991 | 496 | cache = NEXT(cache); |
nkeynes@991 | 497 | } |
nkeynes@991 | 498 | assert( cache == tail ); |
nkeynes@991 | 499 | assert( foundptr == 1 || tail == ptr ); |
nkeynes@991 | 500 | } |
nkeynes@991 | 501 | |
nkeynes@1091 | 502 | /** |
nkeynes@1091 | 503 | * Sanity check that the given pointer is at least contained in one of cache |
nkeynes@1091 | 504 | * regions, and has a sane-ish size. We don't do a full region walk atm. |
nkeynes@1091 | 505 | */ |
nkeynes@1091 | 506 | gboolean xlat_is_code_pointer( void *p ) |
nkeynes@1091 | 507 | { |
nkeynes@1091 | 508 | char *region; |
nkeynes@1091 | 509 | uintptr_t region_size; |
nkeynes@1091 | 510 | |
nkeynes@1091 | 511 | xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p); |
nkeynes@1091 | 512 | if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) { |
nkeynes@1091 | 513 | /* Pointer is in new cache */ |
nkeynes@1091 | 514 | region = (char *)xlat_new_cache; |
nkeynes@1091 | 515 | region_size = XLAT_NEW_CACHE_SIZE; |
nkeynes@1091 | 516 | } |
nkeynes@1091 | 517 | #ifdef XLAT_GENERATIONAL_CACHE |
nkeynes@1091 | 518 | else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) { |
nkeynes@1091 | 519 | /* Pointer is in temp cache */ |
nkeynes@1091 | 520 | region = (char *)xlat_temp_cache; |
nkeynes@1091 | 521 | region_size = XLAT_TEMP_CACHE_SIZE; |
nkeynes@1091 | 522 | } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) { |
nkeynes@1091 | 523 | /* Pointer is in old cache */ |
nkeynes@1091 | 524 | region = (char *)xlat_old_cache; |
nkeynes@1091 | 525 | region_size = XLAT_OLD_CACHE_SIZE; |
nkeynes@1091 | 526 | } |
nkeynes@1091 | 527 | #endif |
nkeynes@1091 | 528 | else { |
nkeynes@1091 | 529 | /* Not a valid cache pointer */ |
nkeynes@1091 | 530 | return FALSE; |
nkeynes@1091 | 531 | } |
nkeynes@1091 | 532 | |
nkeynes@1091 | 533 | /* Make sure the whole block is in the region */ |
nkeynes@1091 | 534 | if( (((char *)p) - region) >= region_size || |
nkeynes@1091 | 535 | (((char *)(NEXT(block))) - region) >= region_size ) |
nkeynes@1091 | 536 | return FALSE; |
nkeynes@1091 | 537 | return TRUE; |
nkeynes@1091 | 538 | } |
nkeynes@1091 | 539 | |
nkeynes@991 | 540 | void xlat_check_integrity( ) |
nkeynes@991 | 541 | { |
nkeynes@991 | 542 | xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE ); |
nkeynes@991 | 543 | #ifdef XLAT_GENERATIONAL_CACHE |
nkeynes@991 | 544 | xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE ); |
nkeynes@991 | 545 | xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE ); |
nkeynes@991 | 546 | #endif |
nkeynes@991 | 547 | } |
nkeynes@991 | 548 |
.