Search
lxdream.org :: lxdream/src/xlat/xltcache.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/xlat/xltcache.c
changeset 1182:b38a327ad8fa
prev1175:712c418cad83
next1186:2dc47c67bb93
author nkeynes
date Sun Nov 27 18:20:21 2011 +1000 (10 years ago)
permissions -rw-r--r--
last change Add block profiling option to count the number of executions of each block,
and dump them out from most-to-least used.
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Translation cache management. This part is architecture independent.
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    19 #include <sys/types.h>
    20 #include <sys/mman.h>
    21 #include <assert.h>
    23 #include "dreamcast.h"
    24 #include "sh4/sh4core.h"
    25 #include "xlat/xltcache.h"
    26 #include "x86dasm/x86dasm.h"
    28 #define XLAT_LUT_PAGE_BITS 12
    29 #define XLAT_LUT_TOTAL_BITS 28
    30 #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
    31 #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
    33 #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
    34 #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
    35 #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
    37 #define XLAT_LUT_ENTRY_EMPTY (void *)0
    38 #define XLAT_LUT_ENTRY_USED  (void *)1
    40 #define XLAT_ADDR_FROM_ENTRY(pagenum,entrynum) ((((pagenum)&0xFFFF)<<13)|(((entrynum)<<1)&0x1FFE))
    42 #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
    43 #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
    44 #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
    46 #define MIN_BLOCK_SIZE 32
    47 #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
    49 #define BLOCK_INACTIVE 0
    50 #define BLOCK_ACTIVE 1
    51 #define BLOCK_USED 2
    53 xlat_cache_block_t xlat_new_cache;
    54 xlat_cache_block_t xlat_new_cache_ptr;
    55 xlat_cache_block_t xlat_new_create_ptr;
    57 #ifdef XLAT_GENERATIONAL_CACHE
    58 xlat_cache_block_t xlat_temp_cache;
    59 xlat_cache_block_t xlat_temp_cache_ptr;
    60 xlat_cache_block_t xlat_old_cache;
    61 xlat_cache_block_t xlat_old_cache_ptr;
    62 #endif
    64 static void **xlat_lut[XLAT_LUT_PAGES];
    65 static gboolean xlat_initialized = FALSE;
    67 void xlat_cache_init(void) 
    68 {
    69     if( !xlat_initialized ) {
    70         xlat_initialized = TRUE;
    71         xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    72                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    73         xlat_new_cache_ptr = xlat_new_cache;
    74         xlat_new_create_ptr = xlat_new_cache;
    75 #ifdef XLAT_GENERATIONAL_CACHE
    76         xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    77                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    78         xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    79                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    80         xlat_temp_cache_ptr = xlat_temp_cache;
    81         xlat_old_cache_ptr = xlat_old_cache;
    82 #endif
    83 //        xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
    84 //                MAP_PRIVATE|MAP_ANON, -1, 0);
    85         memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
    86     }
    87     xlat_flush_cache();
    88 }
    90 /**
    91  * Reset the cache structure to its default state
    92  */
    93 void xlat_flush_cache() 
    94 {
    95     xlat_cache_block_t tmp;
    96     int i;
    97     xlat_new_cache_ptr = xlat_new_cache;
    98     xlat_new_cache_ptr->active = 0;
    99     xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   100     tmp = NEXT(xlat_new_cache_ptr);
   101     tmp->active = 1;
   102     tmp->size = 0;
   103 #ifdef XLAT_GENERATIONAL_CACHE
   104     xlat_temp_cache_ptr = xlat_temp_cache;
   105     xlat_temp_cache_ptr->active = 0;
   106     xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   107     tmp = NEXT(xlat_temp_cache_ptr);
   108     tmp->active = 1;
   109     tmp->size = 0;
   110     xlat_old_cache_ptr = xlat_old_cache;
   111     xlat_old_cache_ptr->active = 0;
   112     xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   113     tmp = NEXT(xlat_old_cache_ptr);
   114     tmp->active = 1;
   115     tmp->size = 0;
   116 #endif
   117     for( i=0; i<XLAT_LUT_PAGES; i++ ) {
   118         if( xlat_lut[i] != NULL ) {
   119             memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
   120         }
   121     }
   122 }
   124 static void xlat_flush_page_by_lut( void **page )
   125 {
   126     int i;
   127     for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
   128         if( IS_ENTRY_POINT(page[i]) ) {
   129             void *p = page[i];
   130             do {
   131                 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
   132                 block->active = 0;
   133                 p = block->chain;
   134             } while( p != NULL );
   135         }
   136         page[i] = NULL;
   137     }
   138 }
   140 void FASTCALL xlat_invalidate_word( sh4addr_t addr )
   141 {
   142     void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
   143     if( page != NULL ) {
   144         int entry = XLAT_LUT_ENTRY(addr);
   145         if( page[entry] != NULL ) {
   146             xlat_flush_page_by_lut(page);
   147         }
   148     }
   149 }
   151 void FASTCALL xlat_invalidate_long( sh4addr_t addr )
   152 {
   153     void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
   154     if( page != NULL ) {
   155         int entry = XLAT_LUT_ENTRY(addr);
   156         if( *(uint64_t *)&page[entry] != 0 ) {
   157             xlat_flush_page_by_lut(page);
   158         }
   159     }
   160 }
   162 void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
   163 {
   164     int i;
   165     int entry_count = size >> 1; // words;
   166     uint32_t page_no = XLAT_LUT_PAGE(address);
   167     int entry = XLAT_LUT_ENTRY(address);
   168     do {
   169         void **page = xlat_lut[page_no];
   170         int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
   171         if( entry_count < page_entries ) {
   172             page_entries = entry_count;
   173         }
   174         if( page != NULL ) {
   175             if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
   176                 /* Overwriting the entire page anyway */
   177                 xlat_flush_page_by_lut(page);
   178             } else {
   179                 for( i=entry; i<entry+page_entries; i++ ) {
   180                     if( page[i] != NULL ) {
   181                         xlat_flush_page_by_lut(page);
   182                         break;
   183                     }
   184                 }
   185             }
   186             entry_count -= page_entries;
   187         }
   188         page_no ++;
   189         entry_count -= page_entries;
   190         entry = 0;
   191     } while( entry_count > 0 );
   192 }
   194 void FASTCALL xlat_flush_page( sh4addr_t address )
   195 {
   196     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   197     if( page != NULL ) {
   198         xlat_flush_page_by_lut(page);
   199     }
   200 }
   202 void * FASTCALL xlat_get_code( sh4addr_t address )
   203 {
   204     void *result = NULL;
   205     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   206     if( page != NULL ) {
   207         result = (void *)(((uintptr_t)(page[XLAT_LUT_ENTRY(address)])) & (~((uintptr_t)0x03)));
   208     }
   209     return result;
   210 }
   212 xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
   213 {
   214     if( code != NULL ) {
   215         uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
   216         xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
   217         uint32_t count = block->recover_table_size;
   218         xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
   219         uint32_t posn;
   220         for( posn = 1; posn < count; posn++ ) {
   221         	if( records[posn].xlat_offset >= pc_offset ) {
   222         		return &records[posn-1];
   223         	}
   224         }
   225         return &records[count-1];
   226     }
   227     return NULL;	
   228 }
   230 void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
   231 {
   232     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   234     /* Add the LUT entry for the block */
   235     if( page == NULL ) {
   236         xlat_lut[XLAT_LUT_PAGE(address)] = page =
   237             (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
   238                     MAP_PRIVATE|MAP_ANON, -1, 0 );
   239         memset( page, 0, XLAT_LUT_PAGE_SIZE );
   240     }
   242     return &page[XLAT_LUT_ENTRY(address)];
   243 }
   247 uint32_t FASTCALL xlat_get_block_size( void *block )
   248 {
   249     xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
   250     return xlt->size;
   251 }
   253 uint32_t FASTCALL xlat_get_code_size( void *block )
   254 {
   255     xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
   256     if( xlt->recover_table_offset == 0 ) {
   257         return xlt->size;
   258     } else {
   259         return xlt->recover_table_offset;
   260     }
   261 }
   263 /**
   264  * Cut the specified block so that it has the given size, with the remaining data
   265  * forming a new free block. If the free block would be less than the minimum size,
   266  * the cut is not performed.
   267  * @return the next block after the (possibly cut) block.
   268  */
   269 static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
   270 {
   271     cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
   272     assert( cutsize <= block->size );
   273     if( block->size > cutsize + MIN_TOTAL_SIZE ) {
   274         int oldsize = block->size;
   275         block->size = cutsize;
   276         xlat_cache_block_t next = NEXT(block);
   277         next->active = 0;
   278         next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
   279         return next;
   280     } else {
   281         return NEXT(block);
   282     }
   283 }
   285 #ifdef XLAT_GENERATIONAL_CACHE
   286 /**
   287  * Promote a block in temp space (or elsewhere for that matter) to old space.
   288  *
   289  * @param block to promote.
   290  */
   291 static void xlat_promote_to_old_space( xlat_cache_block_t block )
   292 {
   293     int allocation = (int)-sizeof(struct xlat_cache_block);
   294     int size = block->size;
   295     xlat_cache_block_t curr = xlat_old_cache_ptr;
   296     xlat_cache_block_t start_block = curr;
   297     do {
   298         allocation += curr->size + sizeof(struct xlat_cache_block);
   299         curr = NEXT(curr);
   300         if( allocation > size ) {
   301             break; /* done */
   302         }
   303         if( curr->size == 0 ) { /* End-of-cache Sentinel */
   304             /* Leave what we just released as free space and start again from the
   305              * top of the cache
   306              */
   307             start_block->active = 0;
   308             start_block->size = allocation;
   309             allocation = (int)-sizeof(struct xlat_cache_block);
   310             start_block = curr = xlat_old_cache;
   311         }
   312     } while(1);
   313     start_block->active = 1;
   314     start_block->size = allocation;
   315     start_block->lut_entry = block->lut_entry;
   316     start_block->chain = block->chain;
   317     start_block->fpscr_mask = block->fpscr_mask;
   318     start_block->fpscr = block->fpscr;
   319     start_block->recover_table_offset = block->recover_table_offset;
   320     start_block->recover_table_size = block->recover_table_size;
   321     *block->lut_entry = &start_block->code;
   322     memcpy( start_block->code, block->code, block->size );
   323     xlat_old_cache_ptr = xlat_cut_block(start_block, size );
   324     if( xlat_old_cache_ptr->size == 0 ) {
   325         xlat_old_cache_ptr = xlat_old_cache;
   326     }
   327 }
   329 /**
   330  * Similarly to the above method, promotes a block to temp space.
   331  * TODO: Try to combine these - they're nearly identical
   332  */
   333 void xlat_promote_to_temp_space( xlat_cache_block_t block )
   334 {
   335     int size = block->size;
   336     int allocation = (int)-sizeof(struct xlat_cache_block);
   337     xlat_cache_block_t curr = xlat_temp_cache_ptr;
   338     xlat_cache_block_t start_block = curr;
   339     do {
   340         if( curr->active == BLOCK_USED ) {
   341             xlat_promote_to_old_space( curr );
   342         } else if( curr->active == BLOCK_ACTIVE ) {
   343             // Active but not used, release block
   344             *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
   345         }
   346         allocation += curr->size + sizeof(struct xlat_cache_block);
   347         curr = NEXT(curr);
   348         if( allocation > size ) {
   349             break; /* done */
   350         }
   351         if( curr->size == 0 ) { /* End-of-cache Sentinel */
   352             /* Leave what we just released as free space and start again from the
   353              * top of the cache
   354              */
   355             start_block->active = 0;
   356             start_block->size = allocation;
   357             allocation = (int)-sizeof(struct xlat_cache_block);
   358             start_block = curr = xlat_temp_cache;
   359         }
   360     } while(1);
   361     start_block->active = 1;
   362     start_block->size = allocation;
   363     start_block->lut_entry = block->lut_entry;
   364     start_block->chain = block->chain;
   365     start_block->fpscr_mask = block->fpscr_mask;
   366     start_block->fpscr = block->fpscr;
   367     start_block->recover_table_offset = block->recover_table_offset;
   368     start_block->recover_table_size = block->recover_table_size;
   369     *block->lut_entry = &start_block->code;
   370     memcpy( start_block->code, block->code, block->size );
   371     xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
   372     if( xlat_temp_cache_ptr->size == 0 ) {
   373         xlat_temp_cache_ptr = xlat_temp_cache;
   374     }
   376 }
   377 #else 
   378 void xlat_promote_to_temp_space( xlat_cache_block_t block )
   379 {
   380     *block->lut_entry = 0;
   381 }
   382 #endif
   384 /**
   385  * Returns the next block in the new cache list that can be written to by the
   386  * translator. If the next block is active, it is evicted first.
   387  */
   388 xlat_cache_block_t xlat_start_block( sh4addr_t address )
   389 {
   390     if( xlat_new_cache_ptr->size == 0 ) {
   391         xlat_new_cache_ptr = xlat_new_cache;
   392     }
   394     if( xlat_new_cache_ptr->active ) {
   395         xlat_promote_to_temp_space( xlat_new_cache_ptr );
   396     }
   397     xlat_new_create_ptr = xlat_new_cache_ptr;
   398     xlat_new_create_ptr->active = 1;
   399     xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   401     /* Add the LUT entry for the block */
   402     if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) {
   403         xlat_lut[XLAT_LUT_PAGE(address)] =
   404             (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
   405                     MAP_PRIVATE|MAP_ANON, -1, 0 );
   406         memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE );
   407     }
   409     if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) {
   410         void *p = xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)];
   411         xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_CODE(p);
   412         assert( oldblock->active );
   413         xlat_new_create_ptr->chain = p;
   414     } else {
   415         xlat_new_create_ptr->chain = NULL;
   416     }
   418     xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] = 
   419         &xlat_new_create_ptr->code;
   420     xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address);
   422     return xlat_new_create_ptr;
   423 }
   425 xlat_cache_block_t xlat_extend_block( uint32_t newSize )
   426 {
   427     while( xlat_new_create_ptr->size < newSize ) {
   428         if( xlat_new_cache_ptr->size == 0 ) {
   429             /* Migrate to the front of the cache to keep it contiguous */
   430             xlat_new_create_ptr->active = 0;
   431             sh4ptr_t olddata = xlat_new_create_ptr->code;
   432             int oldsize = xlat_new_create_ptr->size;
   433             int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
   434             void **lut_entry = xlat_new_create_ptr->lut_entry;
   435             void *chain = xlat_new_create_ptr->chain;
   436             int allocation = (int)-sizeof(struct xlat_cache_block);
   437             xlat_new_cache_ptr = xlat_new_cache;
   438             do {
   439                 if( xlat_new_cache_ptr->active ) {
   440                     xlat_promote_to_temp_space( xlat_new_cache_ptr );
   441                 }
   442                 allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
   443                 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   444             } while( allocation < size );
   445             xlat_new_create_ptr = xlat_new_cache;
   446             xlat_new_create_ptr->active = 1;
   447             xlat_new_create_ptr->size = allocation;
   448             xlat_new_create_ptr->lut_entry = lut_entry;
   449             xlat_new_create_ptr->chain = chain;
   450             *lut_entry = &xlat_new_create_ptr->code;
   451             memmove( xlat_new_create_ptr->code, olddata, oldsize );
   452         } else {
   453             if( xlat_new_cache_ptr->active ) {
   454                 xlat_promote_to_temp_space( xlat_new_cache_ptr );
   455             }
   456             xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
   457             xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   458         }
   459     }
   460     return xlat_new_create_ptr;
   462 }
   464 void xlat_commit_block( uint32_t destsize, uint32_t srcsize )
   465 {
   466     void **ptr = xlat_new_create_ptr->lut_entry;
   467     void **endptr = ptr + (srcsize>>1);
   468     while( ptr < endptr ) {
   469         if( *ptr == NULL ) {
   470             *ptr = XLAT_LUT_ENTRY_USED;
   471         }
   472         ptr++;
   473     }
   475     xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
   476 }
   478 void xlat_delete_block( xlat_cache_block_t block ) 
   479 {
   480     block->active = 0;
   481     *block->lut_entry = NULL;
   482 }
   484 void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
   485 {
   486     int foundptr = 0;
   487     xlat_cache_block_t tail = 
   488         (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
   490     assert( tail->active == 1 );
   491     assert( tail->size == 0 ); 
   492     while( cache < tail ) {
   493         assert( cache->active >= 0 && cache->active <= 2 );
   494         assert( cache->size >= 0 && cache->size < size );
   495         if( cache == ptr ) {
   496             foundptr = 1;
   497         }
   498         cache = NEXT(cache);
   499     }
   500     assert( cache == tail );
   501     assert( foundptr == 1 || tail == ptr );
   502 }
   504 /**
   505  * Perform a reverse lookup to determine the SH4 address corresponding to
   506  * the start of the code block containing ptr. This is _slow_ - it does a
   507  * linear scan of the lookup table to find this.
   508  *
   509  * If the pointer cannot be found in any live block, returns -1 (as this
   510  * is not a legal PC)
   511  */
   512 sh4addr_t xlat_get_address( unsigned char *ptr )
   513 {
   514     int i,j;
   515     for( i=0; i<XLAT_LUT_PAGES; i++ ) {
   516         void **page = xlat_lut[i];
   517         if( page != NULL ) {
   518             for( j=0; j<XLAT_LUT_PAGE_ENTRIES; j++ ) {
   519                 void *entry = page[j];
   520                 if( ((uintptr_t)entry) > XLAT_LUT_ENTRY_USED ) {
   521                     xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(entry);
   522                     if( ptr >= block->code && ptr < block->code + block->size) {
   523                         /* Found it */
   524                         return (i<<13) | (j<<1);
   525                     }
   526                 }
   527             }
   528         }
   529     }
   530     return -1;
   531 }
   533 /**
   534  * Sanity check that the given pointer is at least contained in one of cache
   535  * regions, and has a sane-ish size. We don't do a full region walk atm.
   536  */
   537 gboolean xlat_is_code_pointer( void *p )
   538 {
   539     char *region;
   540     uintptr_t region_size;
   542     xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
   543     if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) {
   544          /* Pointer is in new cache */
   545         region = (char *)xlat_new_cache;
   546         region_size = XLAT_NEW_CACHE_SIZE;
   547     }
   548 #ifdef XLAT_GENERATIONAL_CACHE
   549     else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) {
   550          /* Pointer is in temp cache */
   551         region = (char *)xlat_temp_cache;
   552         region_size = XLAT_TEMP_CACHE_SIZE;
   553     } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) {
   554         /* Pointer is in old cache */
   555         region = (char *)xlat_old_cache;
   556         region_size = XLAT_OLD_CACHE_SIZE;
   557     }
   558 #endif
   559     else {
   560         /* Not a valid cache pointer */
   561         return FALSE;
   562     }
   564     /* Make sure the whole block is in the region */
   565     if( (((char *)p) - region) >= region_size ||
   566         (((char *)(NEXT(block))) - region) >= region_size )
   567         return FALSE;
   568     return TRUE;
   569 }
   571 void xlat_check_integrity( )
   572 {
   573     xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
   574 #ifdef XLAT_GENERATIONAL_CACHE
   575     xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
   576     xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
   577 #endif
   578 }
   580 typedef struct {
   581     xlat_cache_block_t block;
   582     sh4addr_t sh4_pc;
   583 } block_sh4_entry;
   585 unsigned int xlat_get_active_block_count()
   586 {
   587     unsigned int count = 0;
   588     xlat_cache_block_t ptr = xlat_new_cache;
   589     while( ptr->size != 0 ) {
   590         if( ptr->active != 0 ) {
   591             count++;
   592         }
   593         ptr = NEXT(ptr);
   594     }
   595     return count;
   596 }
   598 unsigned int xlat_get_active_blocks( block_sh4_entry *blocks, unsigned int size )
   599 {
   600     unsigned int count = 0;
   601     xlat_cache_block_t ptr = xlat_new_cache;
   602     while( ptr->size != 0 ) {
   603         if( ptr->active != 0 ) {
   604             blocks[count].block = ptr;
   605             blocks[count].sh4_pc = 0;
   606             count++;
   607         }
   608         if( count >= size )
   609             break;
   610         ptr = NEXT(ptr);
   611     }
   612     return count;
   613 }
   615 void xlat_get_block_sh4addrs( block_sh4_entry *blocks, unsigned int size )
   616 {
   617     unsigned i;
   618     for( i=0; i<XLAT_LUT_PAGES;i ++ ) {
   619         void **page = xlat_lut[i];
   620         if( page != NULL ) {
   621             for( unsigned j=0; j < XLAT_LUT_PAGE_ENTRIES; j++ ) {
   622                 void *code = (void *)(((uintptr_t)(page[j])) & (~((uintptr_t)0x03)));
   623                 if( code != NULL ) {
   624                     xlat_cache_block_t ptr = XLAT_BLOCK_FOR_CODE(code);
   625                     sh4addr_t pc = XLAT_ADDR_FROM_ENTRY(i,j);
   626                     for( unsigned k=0; k<size; k++ ) {
   627                         if( blocks[k].block == ptr ) {
   628                             blocks[k].sh4_pc = pc;
   629                             ptr = ptr->chain;
   630                             if( ptr == NULL )
   631                                 break;
   632                             else {
   633                                 ptr = XLAT_BLOCK_FOR_CODE(ptr);
   634                                 k = 0;
   635                             }
   636                         }
   637                     }
   638                 }
   639             }
   640         }
   641     }
   642 }
   644 static int xlat_compare_active_field( const void *a, const void *b )
   645 {
   646     const block_sh4_entry *ptra = (const block_sh4_entry *)a;
   647     const block_sh4_entry *ptrb = (const block_sh4_entry *)b;
   648     return ptrb->block->active - ptra->block->active;
   649 }
   651 void xlat_dump_cache_by_activity( unsigned int topN )
   652 {
   653     int i=0;
   654     int count = xlat_get_active_block_count();
   656     block_sh4_entry blocks[count];
   657     xlat_get_active_blocks(blocks, count);
   658     xlat_get_block_sh4addrs(blocks,count);
   659     qsort(blocks, count, sizeof(block_sh4_entry), xlat_compare_active_field);
   661     if( topN == 0 || topN > count )
   662         topN = count;
   663     for( unsigned int i=0; i<topN; i++ ) {
   664         fprintf(stderr, "0x%08X (%p): %d\n", blocks[i].sh4_pc, blocks[i].block->code, blocks[i].block->active);
   665         sh4_translate_disasm_block( stderr, blocks[i].block->code, blocks[i].sh4_pc, NULL );
   666         fprintf(stderr, "\n");
   667     }
   668 }
.