nkeynes@359: /** nkeynes@586: * $Id$ nkeynes@359: * nkeynes@359: * Translation cache management. This part is architecture independent. nkeynes@359: * nkeynes@359: * Copyright (c) 2005 Nathan Keynes. nkeynes@359: * nkeynes@359: * This program is free software; you can redistribute it and/or modify nkeynes@359: * it under the terms of the GNU General Public License as published by nkeynes@359: * the Free Software Foundation; either version 2 of the License, or nkeynes@359: * (at your option) any later version. nkeynes@359: * nkeynes@359: * This program is distributed in the hope that it will be useful, nkeynes@359: * but WITHOUT ANY WARRANTY; without even the implied warranty of nkeynes@359: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the nkeynes@359: * GNU General Public License for more details. nkeynes@359: */ nkeynes@736: nkeynes@488: #include nkeynes@359: #include nkeynes@359: #include nkeynes@359: nkeynes@428: #include "dreamcast.h" nkeynes@586: #include "sh4/sh4core.h" nkeynes@428: #include "sh4/xltcache.h" nkeynes@428: #include "x86dasm/x86dasm.h" nkeynes@428: nkeynes@359: #define XLAT_LUT_PAGE_BITS 12 nkeynes@359: #define XLAT_LUT_TOTAL_BITS 28 nkeynes@359: #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF) nkeynes@359: #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1) nkeynes@359: nkeynes@359: #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS)) nkeynes@359: #define XLAT_LUT_PAGE_ENTRIES (1<code[(block)->size])) nkeynes@359: #define BLOCK_FOR_CODE(code) (((xlat_cache_block_t)code)-1) nkeynes@359: #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED) nkeynes@359: #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY) nkeynes@359: nkeynes@359: #define MIN_BLOCK_SIZE 32 nkeynes@359: #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE) nkeynes@359: nkeynes@359: #define BLOCK_INACTIVE 0 nkeynes@359: #define BLOCK_ACTIVE 1 nkeynes@359: #define BLOCK_USED 2 nkeynes@359: nkeynes@359: xlat_cache_block_t xlat_new_cache; nkeynes@359: xlat_cache_block_t xlat_new_cache_ptr; nkeynes@359: xlat_cache_block_t xlat_new_create_ptr; nkeynes@359: xlat_cache_block_t xlat_temp_cache; nkeynes@359: xlat_cache_block_t xlat_temp_cache_ptr; nkeynes@359: xlat_cache_block_t xlat_old_cache; nkeynes@359: xlat_cache_block_t xlat_old_cache_ptr; nkeynes@359: static void ***xlat_lut; nkeynes@376: static gboolean xlat_initialized = FALSE; nkeynes@359: nkeynes@422: void xlat_cache_init(void) nkeynes@359: { nkeynes@376: if( !xlat_initialized ) { nkeynes@736: xlat_initialized = TRUE; nkeynes@736: xlat_new_cache = mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, nkeynes@736: MAP_PRIVATE|MAP_ANON, -1, 0 ); nkeynes@736: xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, nkeynes@736: MAP_PRIVATE|MAP_ANON, -1, 0 ); nkeynes@736: xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, nkeynes@736: MAP_PRIVATE|MAP_ANON, -1, 0 ); nkeynes@736: xlat_new_cache_ptr = xlat_new_cache; nkeynes@736: xlat_temp_cache_ptr = xlat_temp_cache; nkeynes@736: xlat_old_cache_ptr = xlat_old_cache; nkeynes@736: xlat_new_create_ptr = xlat_new_cache; nkeynes@736: nkeynes@736: xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE, nkeynes@736: MAP_PRIVATE|MAP_ANON, -1, 0); nkeynes@736: memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) ); nkeynes@376: } nkeynes@359: xlat_flush_cache(); nkeynes@359: } nkeynes@359: nkeynes@400: void xlat_print_free( FILE *out ) nkeynes@400: { nkeynes@400: fprintf( out, "New space: %d\nTemp space: %d\nOld space: %d\n", nkeynes@736: xlat_new_cache_ptr->size, xlat_temp_cache_ptr->size, xlat_old_cache_ptr->size ); nkeynes@400: } nkeynes@400: nkeynes@359: /** nkeynes@359: * Reset the cache structure to its default state nkeynes@359: */ nkeynes@359: void xlat_flush_cache() nkeynes@359: { nkeynes@359: xlat_cache_block_t tmp; nkeynes@359: int i; nkeynes@359: xlat_new_cache_ptr = xlat_new_cache; nkeynes@359: xlat_new_cache_ptr->active = 0; nkeynes@359: xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block); nkeynes@359: tmp = NEXT(xlat_new_cache_ptr); nkeynes@359: tmp->active = 1; nkeynes@359: tmp->size = 0; nkeynes@359: xlat_temp_cache_ptr = xlat_temp_cache; nkeynes@359: xlat_temp_cache_ptr->active = 0; nkeynes@359: xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block); nkeynes@359: tmp = NEXT(xlat_temp_cache_ptr); nkeynes@359: tmp->active = 1; nkeynes@359: tmp->size = 0; nkeynes@359: xlat_old_cache_ptr = xlat_old_cache; nkeynes@359: xlat_old_cache_ptr->active = 0; nkeynes@359: xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block); nkeynes@359: tmp = NEXT(xlat_old_cache_ptr); nkeynes@359: tmp->active = 1; nkeynes@359: tmp->size = 0; nkeynes@359: for( i=0; iactive = 0; nkeynes@736: } nkeynes@736: page[i] = NULL; nkeynes@359: } nkeynes@359: } nkeynes@359: nkeynes@400: void xlat_invalidate_word( sh4addr_t addr ) nkeynes@400: { nkeynes@400: if( xlat_lut ) { nkeynes@736: void **page = xlat_lut[XLAT_LUT_PAGE(addr)]; nkeynes@736: if( page != NULL ) { nkeynes@736: int entry = XLAT_LUT_ENTRY(addr); nkeynes@736: if( page[entry] != NULL ) { nkeynes@736: xlat_flush_page_by_lut(page); nkeynes@736: } nkeynes@736: } nkeynes@400: } nkeynes@400: } nkeynes@400: nkeynes@400: void xlat_invalidate_long( sh4addr_t addr ) nkeynes@400: { nkeynes@400: if( xlat_lut ) { nkeynes@736: void **page = xlat_lut[XLAT_LUT_PAGE(addr)]; nkeynes@736: if( page != NULL ) { nkeynes@736: int entry = XLAT_LUT_ENTRY(addr); nkeynes@736: if( page[entry] != NULL || page[entry+1] != NULL ) { nkeynes@736: xlat_flush_page_by_lut(page); nkeynes@736: } nkeynes@736: } nkeynes@400: } nkeynes@400: } nkeynes@400: nkeynes@400: void xlat_invalidate_block( sh4addr_t address, size_t size ) nkeynes@400: { nkeynes@400: int i; nkeynes@400: int entry_count = size >> 1; // words; nkeynes@400: uint32_t page_no = XLAT_LUT_PAGE(address); nkeynes@400: int entry = XLAT_LUT_ENTRY(address); nkeynes@400: if( xlat_lut ) { nkeynes@736: do { nkeynes@736: void **page = xlat_lut[page_no]; nkeynes@736: int page_entries = XLAT_LUT_PAGE_ENTRIES - entry; nkeynes@736: if( entry_count < page_entries ) { nkeynes@736: page_entries = entry_count; nkeynes@736: } nkeynes@736: if( page != NULL ) { nkeynes@736: if( page_entries == XLAT_LUT_PAGE_ENTRIES ) { nkeynes@736: /* Overwriting the entire page anyway */ nkeynes@736: xlat_flush_page_by_lut(page); nkeynes@736: } else { nkeynes@736: for( i=entry; i 0 ); nkeynes@400: } nkeynes@400: } nkeynes@400: nkeynes@400: void xlat_flush_page( sh4addr_t address ) nkeynes@400: { nkeynes@400: void **page = xlat_lut[XLAT_LUT_PAGE(address)]; nkeynes@400: if( page != NULL ) { nkeynes@736: xlat_flush_page_by_lut(page); nkeynes@400: } nkeynes@400: } nkeynes@400: nkeynes@359: void *xlat_get_code( sh4addr_t address ) nkeynes@359: { nkeynes@410: void *result = NULL; nkeynes@359: void **page = xlat_lut[XLAT_LUT_PAGE(address)]; nkeynes@407: if( page != NULL ) { nkeynes@736: result = (void *)(((uintptr_t)(page[XLAT_LUT_ENTRY(address)])) & (~((uintptr_t)0x03))); nkeynes@407: } nkeynes@407: return result; nkeynes@407: } nkeynes@407: nkeynes@586: xlat_recovery_record_t xlat_get_recovery( void *code, void *native_pc, gboolean recover_after ) nkeynes@586: { nkeynes@586: if( code != NULL ) { nkeynes@736: uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code); nkeynes@736: xlat_cache_block_t block = BLOCK_FOR_CODE(code); nkeynes@736: uint32_t count = block->recover_table_size; nkeynes@736: xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]); nkeynes@736: uint32_t posn; nkeynes@736: if( recover_after ) { nkeynes@736: if( records[count-1].xlat_offset < pc_offset ) { nkeynes@736: return NULL; nkeynes@736: } nkeynes@736: for( posn=count-1; posn > 0; posn-- ) { nkeynes@736: if( records[posn-1].xlat_offset < pc_offset ) { nkeynes@736: return &records[posn]; nkeynes@736: } nkeynes@736: } nkeynes@736: return &records[0]; // shouldn't happen nkeynes@736: } else { nkeynes@736: for( posn = 1; posn < count; posn++ ) { nkeynes@736: if( records[posn].xlat_offset >= pc_offset ) { nkeynes@736: return &records[posn-1]; nkeynes@736: } nkeynes@736: } nkeynes@736: return &records[count-1]; nkeynes@736: } nkeynes@586: } nkeynes@586: return NULL; nkeynes@586: } nkeynes@586: nkeynes@407: void **xlat_get_lut_entry( sh4addr_t address ) nkeynes@407: { nkeynes@407: void **page = xlat_lut[XLAT_LUT_PAGE(address)]; nkeynes@407: nkeynes@407: /* Add the LUT entry for the block */ nkeynes@359: if( page == NULL ) { nkeynes@736: xlat_lut[XLAT_LUT_PAGE(address)] = page = nkeynes@736: mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE, nkeynes@736: MAP_PRIVATE|MAP_ANON, -1, 0 ); nkeynes@736: memset( page, 0, XLAT_LUT_PAGE_SIZE ); nkeynes@359: } nkeynes@736: nkeynes@407: return &page[XLAT_LUT_ENTRY(address)]; nkeynes@359: } nkeynes@359: nkeynes@407: nkeynes@407: nkeynes@366: uint32_t xlat_get_block_size( void *block ) nkeynes@366: { nkeynes@366: xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block)); nkeynes@366: return xlt->size; nkeynes@366: } nkeynes@366: nkeynes@586: uint32_t xlat_get_code_size( void *block ) nkeynes@586: { nkeynes@586: xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block)); nkeynes@592: if( xlt->recover_table_offset == 0 ) { nkeynes@736: return xlt->size; nkeynes@586: } else { nkeynes@736: return xlt->recover_table_offset; nkeynes@586: } nkeynes@586: } nkeynes@586: nkeynes@359: /** nkeynes@359: * Cut the specified block so that it has the given size, with the remaining data nkeynes@359: * forming a new free block. If the free block would be less than the minimum size, nkeynes@359: * the cut is not performed. nkeynes@359: * @return the next block after the (possibly cut) block. nkeynes@359: */ nkeynes@359: static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize ) nkeynes@359: { nkeynes@407: cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment nkeynes@410: assert( cutsize <= block->size ); nkeynes@359: if( block->size > cutsize + MIN_TOTAL_SIZE ) { nkeynes@736: int oldsize = block->size; nkeynes@736: block->size = cutsize; nkeynes@736: xlat_cache_block_t next = NEXT(block); nkeynes@736: next->active = 0; nkeynes@736: next->size = oldsize - cutsize - sizeof(struct xlat_cache_block); nkeynes@736: return next; nkeynes@359: } else { nkeynes@736: return NEXT(block); nkeynes@359: } nkeynes@359: } nkeynes@359: nkeynes@359: /** nkeynes@359: * Promote a block in temp space (or elsewhere for that matter) to old space. nkeynes@359: * nkeynes@359: * @param block to promote. nkeynes@359: */ nkeynes@359: static void xlat_promote_to_old_space( xlat_cache_block_t block ) nkeynes@359: { nkeynes@788: int allocation = (int)-sizeof(struct xlat_cache_block); nkeynes@359: int size = block->size; nkeynes@359: xlat_cache_block_t curr = xlat_old_cache_ptr; nkeynes@359: xlat_cache_block_t start_block = curr; nkeynes@359: do { nkeynes@736: allocation += curr->size + sizeof(struct xlat_cache_block); nkeynes@736: curr = NEXT(curr); nkeynes@736: if( allocation > size ) { nkeynes@736: break; /* done */ nkeynes@736: } nkeynes@736: if( curr->size == 0 ) { /* End-of-cache Sentinel */ nkeynes@736: /* Leave what we just released as free space and start again from the nkeynes@736: * top of the cache nkeynes@736: */ nkeynes@736: start_block->active = 0; nkeynes@736: start_block->size = allocation; nkeynes@788: allocation = (int)-sizeof(struct xlat_cache_block); nkeynes@736: start_block = curr = xlat_old_cache; nkeynes@736: } nkeynes@359: } while(1); nkeynes@359: start_block->active = 1; nkeynes@359: start_block->size = allocation; nkeynes@359: start_block->lut_entry = block->lut_entry; nkeynes@596: start_block->recover_table_offset = block->recover_table_offset; nkeynes@596: start_block->recover_table_size = block->recover_table_size; nkeynes@359: *block->lut_entry = &start_block->code; nkeynes@359: memcpy( start_block->code, block->code, block->size ); nkeynes@359: xlat_old_cache_ptr = xlat_cut_block(start_block, size ); nkeynes@359: if( xlat_old_cache_ptr->size == 0 ) { nkeynes@736: xlat_old_cache_ptr = xlat_old_cache; nkeynes@359: } nkeynes@359: } nkeynes@359: nkeynes@359: /** nkeynes@359: * Similarly to the above method, promotes a block to temp space. nkeynes@359: * TODO: Try to combine these - they're nearly identical nkeynes@359: */ nkeynes@359: void xlat_promote_to_temp_space( xlat_cache_block_t block ) nkeynes@359: { nkeynes@359: int size = block->size; nkeynes@788: int allocation = (int)-sizeof(struct xlat_cache_block); nkeynes@359: xlat_cache_block_t curr = xlat_temp_cache_ptr; nkeynes@359: xlat_cache_block_t start_block = curr; nkeynes@359: do { nkeynes@736: if( curr->active == BLOCK_USED ) { nkeynes@736: xlat_promote_to_old_space( curr ); nkeynes@736: } else if( curr->active == BLOCK_ACTIVE ) { nkeynes@736: // Active but not used, release block nkeynes@736: *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03); nkeynes@736: } nkeynes@736: allocation += curr->size + sizeof(struct xlat_cache_block); nkeynes@736: curr = NEXT(curr); nkeynes@736: if( allocation > size ) { nkeynes@736: break; /* done */ nkeynes@736: } nkeynes@736: if( curr->size == 0 ) { /* End-of-cache Sentinel */ nkeynes@736: /* Leave what we just released as free space and start again from the nkeynes@736: * top of the cache nkeynes@736: */ nkeynes@736: start_block->active = 0; nkeynes@736: start_block->size = allocation; nkeynes@788: allocation = (int)-sizeof(struct xlat_cache_block); nkeynes@736: start_block = curr = xlat_temp_cache; nkeynes@736: } nkeynes@359: } while(1); nkeynes@359: start_block->active = 1; nkeynes@359: start_block->size = allocation; nkeynes@359: start_block->lut_entry = block->lut_entry; nkeynes@596: start_block->recover_table_offset = block->recover_table_offset; nkeynes@596: start_block->recover_table_size = block->recover_table_size; nkeynes@359: *block->lut_entry = &start_block->code; nkeynes@359: memcpy( start_block->code, block->code, block->size ); nkeynes@359: xlat_temp_cache_ptr = xlat_cut_block(start_block, size ); nkeynes@359: if( xlat_temp_cache_ptr->size == 0 ) { nkeynes@736: xlat_temp_cache_ptr = xlat_temp_cache; nkeynes@359: } nkeynes@736: nkeynes@359: } nkeynes@359: nkeynes@359: /** nkeynes@359: * Returns the next block in the new cache list that can be written to by the nkeynes@359: * translator. If the next block is active, it is evicted first. nkeynes@359: */ nkeynes@359: xlat_cache_block_t xlat_start_block( sh4addr_t address ) nkeynes@359: { nkeynes@359: if( xlat_new_cache_ptr->size == 0 ) { nkeynes@736: xlat_new_cache_ptr = xlat_new_cache; nkeynes@359: } nkeynes@359: nkeynes@359: if( xlat_new_cache_ptr->active ) { nkeynes@736: xlat_promote_to_temp_space( xlat_new_cache_ptr ); nkeynes@359: } nkeynes@359: xlat_new_create_ptr = xlat_new_cache_ptr; nkeynes@359: xlat_new_create_ptr->active = 1; nkeynes@359: xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr); nkeynes@359: nkeynes@359: /* Add the LUT entry for the block */ nkeynes@359: if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) { nkeynes@736: xlat_lut[XLAT_LUT_PAGE(address)] = nkeynes@736: mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE, nkeynes@736: MAP_PRIVATE|MAP_ANON, -1, 0 ); nkeynes@736: memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE ); nkeynes@359: } nkeynes@359: nkeynes@359: if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) { nkeynes@736: xlat_cache_block_t oldblock = BLOCK_FOR_CODE(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]); nkeynes@736: oldblock->active = 0; nkeynes@359: } nkeynes@359: nkeynes@359: xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] = nkeynes@736: &xlat_new_create_ptr->code; nkeynes@359: xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address); nkeynes@736: nkeynes@359: return xlat_new_create_ptr; nkeynes@359: } nkeynes@359: nkeynes@410: xlat_cache_block_t xlat_extend_block( uint32_t newSize ) nkeynes@359: { nkeynes@410: while( xlat_new_create_ptr->size < newSize ) { nkeynes@736: if( xlat_new_cache_ptr->size == 0 ) { nkeynes@736: /* Migrate to the front of the cache to keep it contiguous */ nkeynes@736: xlat_new_create_ptr->active = 0; nkeynes@736: sh4ptr_t olddata = xlat_new_create_ptr->code; nkeynes@736: int oldsize = xlat_new_create_ptr->size; nkeynes@736: int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */ nkeynes@736: void **lut_entry = xlat_new_create_ptr->lut_entry; nkeynes@788: int allocation = (int)-sizeof(struct xlat_cache_block); nkeynes@736: xlat_new_cache_ptr = xlat_new_cache; nkeynes@736: do { nkeynes@736: if( xlat_new_cache_ptr->active ) { nkeynes@736: xlat_promote_to_temp_space( xlat_new_cache_ptr ); nkeynes@736: } nkeynes@736: allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block); nkeynes@736: xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr); nkeynes@736: } while( allocation < size ); nkeynes@736: xlat_new_create_ptr = xlat_new_cache; nkeynes@736: xlat_new_create_ptr->active = 1; nkeynes@736: xlat_new_create_ptr->size = allocation; nkeynes@736: xlat_new_create_ptr->lut_entry = lut_entry; nkeynes@736: *lut_entry = &xlat_new_create_ptr->code; nkeynes@736: memmove( xlat_new_create_ptr->code, olddata, oldsize ); nkeynes@736: } else { nkeynes@736: if( xlat_new_cache_ptr->active ) { nkeynes@736: xlat_promote_to_temp_space( xlat_new_cache_ptr ); nkeynes@736: } nkeynes@736: xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block); nkeynes@736: xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr); nkeynes@736: } nkeynes@359: } nkeynes@359: return xlat_new_create_ptr; nkeynes@359: nkeynes@359: } nkeynes@359: nkeynes@359: void xlat_commit_block( uint32_t destsize, uint32_t srcsize ) nkeynes@359: { nkeynes@359: void **ptr = xlat_new_create_ptr->lut_entry; nkeynes@359: void **endptr = ptr + (srcsize>>2); nkeynes@359: while( ptr < endptr ) { nkeynes@736: if( *ptr == NULL ) { nkeynes@736: *ptr = XLAT_LUT_ENTRY_USED; nkeynes@736: } nkeynes@736: ptr++; nkeynes@359: } nkeynes@359: nkeynes@359: xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize ); nkeynes@359: } nkeynes@359: nkeynes@359: void xlat_delete_block( xlat_cache_block_t block ) nkeynes@359: { nkeynes@359: block->active = 0; nkeynes@359: *block->lut_entry = NULL; nkeynes@359: } nkeynes@359: nkeynes@359: void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size ) nkeynes@359: { nkeynes@359: int foundptr = 0; nkeynes@359: xlat_cache_block_t tail = nkeynes@736: (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block)); nkeynes@359: nkeynes@359: assert( tail->active == 1 ); nkeynes@359: assert( tail->size == 0 ); nkeynes@359: while( cache < tail ) { nkeynes@736: assert( cache->active >= 0 && cache->active <= 2 ); nkeynes@736: assert( cache->size >= 0 && cache->size < size ); nkeynes@736: if( cache == ptr ) { nkeynes@736: foundptr = 1; nkeynes@736: } nkeynes@736: cache = NEXT(cache); nkeynes@359: } nkeynes@359: assert( cache == tail ); nkeynes@596: assert( foundptr == 1 || tail == ptr ); nkeynes@359: } nkeynes@359: nkeynes@359: void xlat_check_integrity( ) nkeynes@359: { nkeynes@359: xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE ); nkeynes@359: xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE ); nkeynes@359: xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE ); nkeynes@359: } nkeynes@376: