nkeynes@991: /** nkeynes@991: * $Id$ nkeynes@991: * nkeynes@991: * Translation cache management. This part is architecture independent. nkeynes@991: * nkeynes@991: * Copyright (c) 2005 Nathan Keynes. nkeynes@991: * nkeynes@991: * This program is free software; you can redistribute it and/or modify nkeynes@991: * it under the terms of the GNU General Public License as published by nkeynes@991: * the Free Software Foundation; either version 2 of the License, or nkeynes@991: * (at your option) any later version. nkeynes@991: * nkeynes@991: * This program is distributed in the hope that it will be useful, nkeynes@991: * but WITHOUT ANY WARRANTY; without even the implied warranty of nkeynes@991: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the nkeynes@991: * GNU General Public License for more details. nkeynes@991: */ nkeynes@991: nkeynes@991: #include nkeynes@991: #include nkeynes@991: #include nkeynes@991: nkeynes@991: #include "dreamcast.h" nkeynes@991: #include "sh4/sh4core.h" nkeynes@991: #include "xlat/xltcache.h" nkeynes@991: #include "x86dasm/x86dasm.h" nkeynes@991: nkeynes@991: #define XLAT_LUT_PAGE_BITS 12 nkeynes@991: #define XLAT_LUT_TOTAL_BITS 28 nkeynes@991: #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF) nkeynes@991: #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1) nkeynes@991: nkeynes@991: #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS)) nkeynes@991: #define XLAT_LUT_PAGE_ENTRIES (1<code[(block)->size])) nkeynes@991: #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED) nkeynes@991: #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY) nkeynes@991: nkeynes@991: #define MIN_BLOCK_SIZE 32 nkeynes@991: #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE) nkeynes@991: nkeynes@991: #define BLOCK_INACTIVE 0 nkeynes@991: #define BLOCK_ACTIVE 1 nkeynes@991: #define BLOCK_USED 2 nkeynes@991: nkeynes@991: xlat_cache_block_t xlat_new_cache; nkeynes@991: xlat_cache_block_t xlat_new_cache_ptr; nkeynes@991: xlat_cache_block_t xlat_new_create_ptr; nkeynes@991: nkeynes@991: #ifdef XLAT_GENERATIONAL_CACHE nkeynes@991: xlat_cache_block_t xlat_temp_cache; nkeynes@991: xlat_cache_block_t xlat_temp_cache_ptr; nkeynes@991: xlat_cache_block_t xlat_old_cache; nkeynes@991: xlat_cache_block_t xlat_old_cache_ptr; nkeynes@991: #endif nkeynes@991: nkeynes@991: static void **xlat_lut[XLAT_LUT_PAGES]; nkeynes@991: static gboolean xlat_initialized = FALSE; nkeynes@991: nkeynes@991: void xlat_cache_init(void) nkeynes@991: { nkeynes@991: if( !xlat_initialized ) { nkeynes@991: xlat_initialized = TRUE; nkeynes@991: xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, nkeynes@991: MAP_PRIVATE|MAP_ANON, -1, 0 ); nkeynes@991: xlat_new_cache_ptr = xlat_new_cache; nkeynes@991: xlat_new_create_ptr = xlat_new_cache; nkeynes@991: #ifdef XLAT_GENERATIONAL_CACHE nkeynes@991: xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, nkeynes@991: MAP_PRIVATE|MAP_ANON, -1, 0 ); nkeynes@991: xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, nkeynes@991: MAP_PRIVATE|MAP_ANON, -1, 0 ); nkeynes@991: xlat_temp_cache_ptr = xlat_temp_cache; nkeynes@991: xlat_old_cache_ptr = xlat_old_cache; nkeynes@991: #endif nkeynes@991: // xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE, nkeynes@991: // MAP_PRIVATE|MAP_ANON, -1, 0); nkeynes@991: memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) ); nkeynes@991: } nkeynes@991: xlat_flush_cache(); nkeynes@991: } nkeynes@991: nkeynes@991: /** nkeynes@991: * Reset the cache structure to its default state nkeynes@991: */ nkeynes@991: void xlat_flush_cache() nkeynes@991: { nkeynes@991: xlat_cache_block_t tmp; nkeynes@991: int i; nkeynes@991: xlat_new_cache_ptr = xlat_new_cache; nkeynes@991: xlat_new_cache_ptr->active = 0; nkeynes@991: xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block); nkeynes@991: tmp = NEXT(xlat_new_cache_ptr); nkeynes@991: tmp->active = 1; nkeynes@991: tmp->size = 0; nkeynes@991: #ifdef XLAT_GENERATIONAL_CACHE nkeynes@991: xlat_temp_cache_ptr = xlat_temp_cache; nkeynes@991: xlat_temp_cache_ptr->active = 0; nkeynes@991: xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block); nkeynes@991: tmp = NEXT(xlat_temp_cache_ptr); nkeynes@991: tmp->active = 1; nkeynes@991: tmp->size = 0; nkeynes@991: xlat_old_cache_ptr = xlat_old_cache; nkeynes@991: xlat_old_cache_ptr->active = 0; nkeynes@991: xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block); nkeynes@991: tmp = NEXT(xlat_old_cache_ptr); nkeynes@991: tmp->active = 1; nkeynes@991: tmp->size = 0; nkeynes@991: #endif nkeynes@991: for( i=0; iactive = 0; nkeynes@1149: p = block->chain; nkeynes@1149: } while( p != NULL ); nkeynes@991: } nkeynes@991: page[i] = NULL; nkeynes@991: } nkeynes@991: } nkeynes@991: nkeynes@991: void FASTCALL xlat_invalidate_word( sh4addr_t addr ) nkeynes@991: { nkeynes@991: void **page = xlat_lut[XLAT_LUT_PAGE(addr)]; nkeynes@991: if( page != NULL ) { nkeynes@991: int entry = XLAT_LUT_ENTRY(addr); nkeynes@991: if( page[entry] != NULL ) { nkeynes@991: xlat_flush_page_by_lut(page); nkeynes@991: } nkeynes@991: } nkeynes@991: } nkeynes@991: nkeynes@991: void FASTCALL xlat_invalidate_long( sh4addr_t addr ) nkeynes@991: { nkeynes@991: void **page = xlat_lut[XLAT_LUT_PAGE(addr)]; nkeynes@991: if( page != NULL ) { nkeynes@991: int entry = XLAT_LUT_ENTRY(addr); nkeynes@991: if( *(uint64_t *)&page[entry] != 0 ) { nkeynes@991: xlat_flush_page_by_lut(page); nkeynes@991: } nkeynes@991: } nkeynes@991: } nkeynes@991: nkeynes@991: void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size ) nkeynes@991: { nkeynes@991: int i; nkeynes@991: int entry_count = size >> 1; // words; nkeynes@991: uint32_t page_no = XLAT_LUT_PAGE(address); nkeynes@991: int entry = XLAT_LUT_ENTRY(address); nkeynes@991: do { nkeynes@991: void **page = xlat_lut[page_no]; nkeynes@991: int page_entries = XLAT_LUT_PAGE_ENTRIES - entry; nkeynes@991: if( entry_count < page_entries ) { nkeynes@991: page_entries = entry_count; nkeynes@991: } nkeynes@991: if( page != NULL ) { nkeynes@991: if( page_entries == XLAT_LUT_PAGE_ENTRIES ) { nkeynes@991: /* Overwriting the entire page anyway */ nkeynes@991: xlat_flush_page_by_lut(page); nkeynes@991: } else { nkeynes@991: for( i=entry; i 0 ); nkeynes@991: } nkeynes@991: nkeynes@991: void FASTCALL xlat_flush_page( sh4addr_t address ) nkeynes@991: { nkeynes@991: void **page = xlat_lut[XLAT_LUT_PAGE(address)]; nkeynes@991: if( page != NULL ) { nkeynes@991: xlat_flush_page_by_lut(page); nkeynes@991: } nkeynes@991: } nkeynes@991: nkeynes@991: void * FASTCALL xlat_get_code( sh4addr_t address ) nkeynes@991: { nkeynes@991: void *result = NULL; nkeynes@991: void **page = xlat_lut[XLAT_LUT_PAGE(address)]; nkeynes@991: if( page != NULL ) { nkeynes@991: result = (void *)(((uintptr_t)(page[XLAT_LUT_ENTRY(address)])) & (~((uintptr_t)0x03))); nkeynes@991: } nkeynes@991: return result; nkeynes@991: } nkeynes@991: nkeynes@991: xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc ) nkeynes@991: { nkeynes@991: if( code != NULL ) { nkeynes@991: uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code); nkeynes@991: xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code); nkeynes@991: uint32_t count = block->recover_table_size; nkeynes@991: xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]); nkeynes@991: uint32_t posn; nkeynes@991: for( posn = 1; posn < count; posn++ ) { nkeynes@1003: if( records[posn].xlat_offset >= pc_offset ) { nkeynes@991: return &records[posn-1]; nkeynes@991: } nkeynes@991: } nkeynes@991: return &records[count-1]; nkeynes@991: } nkeynes@991: return NULL; nkeynes@991: } nkeynes@991: nkeynes@991: void ** FASTCALL xlat_get_lut_entry( sh4addr_t address ) nkeynes@991: { nkeynes@991: void **page = xlat_lut[XLAT_LUT_PAGE(address)]; nkeynes@991: nkeynes@991: /* Add the LUT entry for the block */ nkeynes@991: if( page == NULL ) { nkeynes@991: xlat_lut[XLAT_LUT_PAGE(address)] = page = nkeynes@991: (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE, nkeynes@991: MAP_PRIVATE|MAP_ANON, -1, 0 ); nkeynes@991: memset( page, 0, XLAT_LUT_PAGE_SIZE ); nkeynes@991: } nkeynes@991: nkeynes@991: return &page[XLAT_LUT_ENTRY(address)]; nkeynes@991: } nkeynes@991: nkeynes@991: nkeynes@991: nkeynes@991: uint32_t FASTCALL xlat_get_block_size( void *block ) nkeynes@991: { nkeynes@991: xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block)); nkeynes@991: return xlt->size; nkeynes@991: } nkeynes@991: nkeynes@991: uint32_t FASTCALL xlat_get_code_size( void *block ) nkeynes@991: { nkeynes@991: xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block)); nkeynes@991: if( xlt->recover_table_offset == 0 ) { nkeynes@991: return xlt->size; nkeynes@991: } else { nkeynes@991: return xlt->recover_table_offset; nkeynes@991: } nkeynes@991: } nkeynes@991: nkeynes@991: /** nkeynes@991: * Cut the specified block so that it has the given size, with the remaining data nkeynes@991: * forming a new free block. If the free block would be less than the minimum size, nkeynes@991: * the cut is not performed. nkeynes@991: * @return the next block after the (possibly cut) block. nkeynes@991: */ nkeynes@991: static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize ) nkeynes@991: { nkeynes@991: cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment nkeynes@991: assert( cutsize <= block->size ); nkeynes@991: if( block->size > cutsize + MIN_TOTAL_SIZE ) { nkeynes@991: int oldsize = block->size; nkeynes@991: block->size = cutsize; nkeynes@991: xlat_cache_block_t next = NEXT(block); nkeynes@991: next->active = 0; nkeynes@991: next->size = oldsize - cutsize - sizeof(struct xlat_cache_block); nkeynes@991: return next; nkeynes@991: } else { nkeynes@991: return NEXT(block); nkeynes@991: } nkeynes@991: } nkeynes@991: nkeynes@991: #ifdef XLAT_GENERATIONAL_CACHE nkeynes@991: /** nkeynes@991: * Promote a block in temp space (or elsewhere for that matter) to old space. nkeynes@991: * nkeynes@991: * @param block to promote. nkeynes@991: */ nkeynes@991: static void xlat_promote_to_old_space( xlat_cache_block_t block ) nkeynes@991: { nkeynes@991: int allocation = (int)-sizeof(struct xlat_cache_block); nkeynes@991: int size = block->size; nkeynes@991: xlat_cache_block_t curr = xlat_old_cache_ptr; nkeynes@991: xlat_cache_block_t start_block = curr; nkeynes@991: do { nkeynes@991: allocation += curr->size + sizeof(struct xlat_cache_block); nkeynes@991: curr = NEXT(curr); nkeynes@991: if( allocation > size ) { nkeynes@991: break; /* done */ nkeynes@991: } nkeynes@991: if( curr->size == 0 ) { /* End-of-cache Sentinel */ nkeynes@991: /* Leave what we just released as free space and start again from the nkeynes@991: * top of the cache nkeynes@991: */ nkeynes@991: start_block->active = 0; nkeynes@991: start_block->size = allocation; nkeynes@991: allocation = (int)-sizeof(struct xlat_cache_block); nkeynes@991: start_block = curr = xlat_old_cache; nkeynes@991: } nkeynes@991: } while(1); nkeynes@991: start_block->active = 1; nkeynes@991: start_block->size = allocation; nkeynes@991: start_block->lut_entry = block->lut_entry; nkeynes@1149: start_block->chain = block->chain; nkeynes@991: start_block->fpscr_mask = block->fpscr_mask; nkeynes@991: start_block->fpscr = block->fpscr; nkeynes@991: start_block->recover_table_offset = block->recover_table_offset; nkeynes@991: start_block->recover_table_size = block->recover_table_size; nkeynes@991: *block->lut_entry = &start_block->code; nkeynes@991: memcpy( start_block->code, block->code, block->size ); nkeynes@991: xlat_old_cache_ptr = xlat_cut_block(start_block, size ); nkeynes@991: if( xlat_old_cache_ptr->size == 0 ) { nkeynes@991: xlat_old_cache_ptr = xlat_old_cache; nkeynes@991: } nkeynes@991: } nkeynes@991: nkeynes@991: /** nkeynes@991: * Similarly to the above method, promotes a block to temp space. nkeynes@991: * TODO: Try to combine these - they're nearly identical nkeynes@991: */ nkeynes@991: void xlat_promote_to_temp_space( xlat_cache_block_t block ) nkeynes@991: { nkeynes@991: int size = block->size; nkeynes@991: int allocation = (int)-sizeof(struct xlat_cache_block); nkeynes@991: xlat_cache_block_t curr = xlat_temp_cache_ptr; nkeynes@991: xlat_cache_block_t start_block = curr; nkeynes@991: do { nkeynes@991: if( curr->active == BLOCK_USED ) { nkeynes@991: xlat_promote_to_old_space( curr ); nkeynes@991: } else if( curr->active == BLOCK_ACTIVE ) { nkeynes@991: // Active but not used, release block nkeynes@991: *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03); nkeynes@991: } nkeynes@991: allocation += curr->size + sizeof(struct xlat_cache_block); nkeynes@991: curr = NEXT(curr); nkeynes@991: if( allocation > size ) { nkeynes@991: break; /* done */ nkeynes@991: } nkeynes@991: if( curr->size == 0 ) { /* End-of-cache Sentinel */ nkeynes@991: /* Leave what we just released as free space and start again from the nkeynes@991: * top of the cache nkeynes@991: */ nkeynes@991: start_block->active = 0; nkeynes@991: start_block->size = allocation; nkeynes@991: allocation = (int)-sizeof(struct xlat_cache_block); nkeynes@991: start_block = curr = xlat_temp_cache; nkeynes@991: } nkeynes@991: } while(1); nkeynes@991: start_block->active = 1; nkeynes@991: start_block->size = allocation; nkeynes@991: start_block->lut_entry = block->lut_entry; nkeynes@1149: start_block->chain = block->chain; nkeynes@991: start_block->fpscr_mask = block->fpscr_mask; nkeynes@991: start_block->fpscr = block->fpscr; nkeynes@991: start_block->recover_table_offset = block->recover_table_offset; nkeynes@991: start_block->recover_table_size = block->recover_table_size; nkeynes@991: *block->lut_entry = &start_block->code; nkeynes@991: memcpy( start_block->code, block->code, block->size ); nkeynes@991: xlat_temp_cache_ptr = xlat_cut_block(start_block, size ); nkeynes@991: if( xlat_temp_cache_ptr->size == 0 ) { nkeynes@991: xlat_temp_cache_ptr = xlat_temp_cache; nkeynes@991: } nkeynes@991: nkeynes@991: } nkeynes@991: #else nkeynes@991: void xlat_promote_to_temp_space( xlat_cache_block_t block ) nkeynes@991: { nkeynes@991: *block->lut_entry = 0; nkeynes@991: } nkeynes@991: #endif nkeynes@991: nkeynes@991: /** nkeynes@991: * Returns the next block in the new cache list that can be written to by the nkeynes@991: * translator. If the next block is active, it is evicted first. nkeynes@991: */ nkeynes@991: xlat_cache_block_t xlat_start_block( sh4addr_t address ) nkeynes@991: { nkeynes@991: if( xlat_new_cache_ptr->size == 0 ) { nkeynes@991: xlat_new_cache_ptr = xlat_new_cache; nkeynes@991: } nkeynes@991: nkeynes@991: if( xlat_new_cache_ptr->active ) { nkeynes@991: xlat_promote_to_temp_space( xlat_new_cache_ptr ); nkeynes@991: } nkeynes@991: xlat_new_create_ptr = xlat_new_cache_ptr; nkeynes@991: xlat_new_create_ptr->active = 1; nkeynes@991: xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr); nkeynes@991: nkeynes@991: /* Add the LUT entry for the block */ nkeynes@991: if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) { nkeynes@991: xlat_lut[XLAT_LUT_PAGE(address)] = nkeynes@991: (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE, nkeynes@991: MAP_PRIVATE|MAP_ANON, -1, 0 ); nkeynes@991: memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE ); nkeynes@991: } nkeynes@991: nkeynes@991: if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) { nkeynes@1149: void *p = xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]; nkeynes@1149: xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_CODE(p); nkeynes@1149: assert( oldblock->active ); nkeynes@1149: xlat_new_create_ptr->chain = p; nkeynes@1149: } else { nkeynes@1149: xlat_new_create_ptr->chain = NULL; nkeynes@991: } nkeynes@991: nkeynes@991: xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] = nkeynes@991: &xlat_new_create_ptr->code; nkeynes@991: xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address); nkeynes@991: nkeynes@991: return xlat_new_create_ptr; nkeynes@991: } nkeynes@991: nkeynes@991: xlat_cache_block_t xlat_extend_block( uint32_t newSize ) nkeynes@991: { nkeynes@991: while( xlat_new_create_ptr->size < newSize ) { nkeynes@991: if( xlat_new_cache_ptr->size == 0 ) { nkeynes@991: /* Migrate to the front of the cache to keep it contiguous */ nkeynes@991: xlat_new_create_ptr->active = 0; nkeynes@991: sh4ptr_t olddata = xlat_new_create_ptr->code; nkeynes@991: int oldsize = xlat_new_create_ptr->size; nkeynes@991: int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */ nkeynes@991: void **lut_entry = xlat_new_create_ptr->lut_entry; nkeynes@1149: void *chain = xlat_new_create_ptr->chain; nkeynes@991: int allocation = (int)-sizeof(struct xlat_cache_block); nkeynes@991: xlat_new_cache_ptr = xlat_new_cache; nkeynes@991: do { nkeynes@991: if( xlat_new_cache_ptr->active ) { nkeynes@991: xlat_promote_to_temp_space( xlat_new_cache_ptr ); nkeynes@991: } nkeynes@991: allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block); nkeynes@991: xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr); nkeynes@991: } while( allocation < size ); nkeynes@991: xlat_new_create_ptr = xlat_new_cache; nkeynes@991: xlat_new_create_ptr->active = 1; nkeynes@991: xlat_new_create_ptr->size = allocation; nkeynes@991: xlat_new_create_ptr->lut_entry = lut_entry; nkeynes@1149: xlat_new_create_ptr->chain = chain; nkeynes@991: *lut_entry = &xlat_new_create_ptr->code; nkeynes@991: memmove( xlat_new_create_ptr->code, olddata, oldsize ); nkeynes@991: } else { nkeynes@991: if( xlat_new_cache_ptr->active ) { nkeynes@991: xlat_promote_to_temp_space( xlat_new_cache_ptr ); nkeynes@991: } nkeynes@991: xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block); nkeynes@991: xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr); nkeynes@991: } nkeynes@991: } nkeynes@991: return xlat_new_create_ptr; nkeynes@991: nkeynes@991: } nkeynes@991: nkeynes@991: void xlat_commit_block( uint32_t destsize, uint32_t srcsize ) nkeynes@991: { nkeynes@991: void **ptr = xlat_new_create_ptr->lut_entry; nkeynes@1126: void **endptr = ptr + (srcsize>>1); nkeynes@991: while( ptr < endptr ) { nkeynes@991: if( *ptr == NULL ) { nkeynes@991: *ptr = XLAT_LUT_ENTRY_USED; nkeynes@991: } nkeynes@991: ptr++; nkeynes@991: } nkeynes@991: nkeynes@991: xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize ); nkeynes@991: } nkeynes@991: nkeynes@991: void xlat_delete_block( xlat_cache_block_t block ) nkeynes@991: { nkeynes@991: block->active = 0; nkeynes@991: *block->lut_entry = NULL; nkeynes@991: } nkeynes@991: nkeynes@991: void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size ) nkeynes@991: { nkeynes@991: int foundptr = 0; nkeynes@991: xlat_cache_block_t tail = nkeynes@991: (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block)); nkeynes@991: nkeynes@991: assert( tail->active == 1 ); nkeynes@991: assert( tail->size == 0 ); nkeynes@991: while( cache < tail ) { nkeynes@991: assert( cache->active >= 0 && cache->active <= 2 ); nkeynes@991: assert( cache->size >= 0 && cache->size < size ); nkeynes@991: if( cache == ptr ) { nkeynes@991: foundptr = 1; nkeynes@991: } nkeynes@991: cache = NEXT(cache); nkeynes@991: } nkeynes@991: assert( cache == tail ); nkeynes@991: assert( foundptr == 1 || tail == ptr ); nkeynes@991: } nkeynes@991: nkeynes@1091: /** nkeynes@1175: * Perform a reverse lookup to determine the SH4 address corresponding to nkeynes@1175: * the start of the code block containing ptr. This is _slow_ - it does a nkeynes@1175: * linear scan of the lookup table to find this. nkeynes@1175: * nkeynes@1175: * If the pointer cannot be found in any live block, returns -1 (as this nkeynes@1175: * is not a legal PC) nkeynes@1175: */ nkeynes@1175: sh4addr_t xlat_get_address( unsigned char *ptr ) nkeynes@1175: { nkeynes@1175: int i,j; nkeynes@1175: for( i=0; i XLAT_LUT_ENTRY_USED ) { nkeynes@1175: xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(entry); nkeynes@1175: if( ptr >= block->code && ptr < block->code + block->size) { nkeynes@1175: /* Found it */ nkeynes@1175: return (i<<13) | (j<<1); nkeynes@1175: } nkeynes@1175: } nkeynes@1175: } nkeynes@1175: } nkeynes@1175: } nkeynes@1175: return -1; nkeynes@1175: } nkeynes@1175: nkeynes@1175: /** nkeynes@1091: * Sanity check that the given pointer is at least contained in one of cache nkeynes@1091: * regions, and has a sane-ish size. We don't do a full region walk atm. nkeynes@1091: */ nkeynes@1091: gboolean xlat_is_code_pointer( void *p ) nkeynes@1091: { nkeynes@1091: char *region; nkeynes@1091: uintptr_t region_size; nkeynes@1091: nkeynes@1091: xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p); nkeynes@1091: if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) { nkeynes@1091: /* Pointer is in new cache */ nkeynes@1091: region = (char *)xlat_new_cache; nkeynes@1091: region_size = XLAT_NEW_CACHE_SIZE; nkeynes@1091: } nkeynes@1091: #ifdef XLAT_GENERATIONAL_CACHE nkeynes@1091: else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) { nkeynes@1091: /* Pointer is in temp cache */ nkeynes@1091: region = (char *)xlat_temp_cache; nkeynes@1091: region_size = XLAT_TEMP_CACHE_SIZE; nkeynes@1091: } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) { nkeynes@1091: /* Pointer is in old cache */ nkeynes@1091: region = (char *)xlat_old_cache; nkeynes@1091: region_size = XLAT_OLD_CACHE_SIZE; nkeynes@1091: } nkeynes@1091: #endif nkeynes@1091: else { nkeynes@1091: /* Not a valid cache pointer */ nkeynes@1091: return FALSE; nkeynes@1091: } nkeynes@1091: nkeynes@1091: /* Make sure the whole block is in the region */ nkeynes@1091: if( (((char *)p) - region) >= region_size || nkeynes@1091: (((char *)(NEXT(block))) - region) >= region_size ) nkeynes@1091: return FALSE; nkeynes@1091: return TRUE; nkeynes@1091: } nkeynes@1091: nkeynes@991: void xlat_check_integrity( ) nkeynes@991: { nkeynes@991: xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE ); nkeynes@991: #ifdef XLAT_GENERATIONAL_CACHE nkeynes@991: xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE ); nkeynes@991: xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE ); nkeynes@991: #endif nkeynes@991: } nkeynes@991: