Search
lxdream.org :: lxdream/src/xlat/xltcache.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/xlat/xltcache.c
changeset 1189:1540105786c8
prev1188:1cc9bb0b3848
next1195:072131b61d2a
author nkeynes
date Thu Dec 01 08:02:13 2011 +1000 (12 years ago)
permissions -rw-r--r--
last change Fix make check
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Translation cache management. This part is architecture independent.
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    19 #include <sys/types.h>
    20 #include <sys/mman.h>
    21 #include <assert.h>
    23 #include "dreamcast.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "xlat/xltcache.h"
    27 #include "x86dasm/x86dasm.h"
    29 #define XLAT_LUT_PAGE_BITS 12
    30 #define XLAT_LUT_TOTAL_BITS 28
    31 #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
    32 #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
    34 #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
    35 #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
    36 #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
    38 #define XLAT_LUT_ENTRY_EMPTY (void *)0
    39 #define XLAT_LUT_ENTRY_USED  (void *)1
    41 #define XLAT_ADDR_FROM_ENTRY(pagenum,entrynum) ((((pagenum)&0xFFFF)<<13)|(((entrynum)<<1)&0x1FFE))
    43 #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
    44 #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
    45 #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
    47 #define MIN_BLOCK_SIZE 32
    48 #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
    50 #define BLOCK_INACTIVE 0
    51 #define BLOCK_ACTIVE 1
    52 #define BLOCK_USED 2
    54 xlat_cache_block_t xlat_new_cache;
    55 xlat_cache_block_t xlat_new_cache_ptr;
    56 xlat_cache_block_t xlat_new_create_ptr;
    58 #ifdef XLAT_GENERATIONAL_CACHE
    59 xlat_cache_block_t xlat_temp_cache;
    60 xlat_cache_block_t xlat_temp_cache_ptr;
    61 xlat_cache_block_t xlat_old_cache;
    62 xlat_cache_block_t xlat_old_cache_ptr;
    63 #endif
    65 static void **xlat_lut[XLAT_LUT_PAGES];
    66 static gboolean xlat_initialized = FALSE;
    68 void xlat_cache_init(void) 
    69 {
    70     if( !xlat_initialized ) {
    71         xlat_initialized = TRUE;
    72         xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    73                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    74         xlat_new_cache_ptr = xlat_new_cache;
    75         xlat_new_create_ptr = xlat_new_cache;
    76 #ifdef XLAT_GENERATIONAL_CACHE
    77         xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    78                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    79         xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    80                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    81         xlat_temp_cache_ptr = xlat_temp_cache;
    82         xlat_old_cache_ptr = xlat_old_cache;
    83 #endif
    84 //        xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
    85 //                MAP_PRIVATE|MAP_ANON, -1, 0);
    86         memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
    87     }
    88     xlat_flush_cache();
    89 }
    91 /**
    92  * Reset the cache structure to its default state
    93  */
    94 void xlat_flush_cache() 
    95 {
    96     xlat_cache_block_t tmp;
    97     int i;
    98     xlat_new_cache_ptr = xlat_new_cache;
    99     xlat_new_cache_ptr->active = 0;
   100     xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   101     tmp = NEXT(xlat_new_cache_ptr);
   102     tmp->active = 1;
   103     tmp->size = 0;
   104 #ifdef XLAT_GENERATIONAL_CACHE
   105     xlat_temp_cache_ptr = xlat_temp_cache;
   106     xlat_temp_cache_ptr->active = 0;
   107     xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   108     tmp = NEXT(xlat_temp_cache_ptr);
   109     tmp->active = 1;
   110     tmp->size = 0;
   111     xlat_old_cache_ptr = xlat_old_cache;
   112     xlat_old_cache_ptr->active = 0;
   113     xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   114     tmp = NEXT(xlat_old_cache_ptr);
   115     tmp->active = 1;
   116     tmp->size = 0;
   117 #endif
   118     for( i=0; i<XLAT_LUT_PAGES; i++ ) {
   119         if( xlat_lut[i] != NULL ) {
   120             memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
   121         }
   122     }
   123 }
   125 void xlat_delete_block( xlat_cache_block_t block )
   126 {
   127     block->active = 0;
   128     *block->lut_entry = block->chain;
   129     sh4_translate_unlink_block( block->use_list );
   130 }
   132 static void xlat_flush_page_by_lut( void **page )
   133 {
   134     int i;
   135     for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
   136         if( IS_ENTRY_POINT(page[i]) ) {
   137             void *p = page[i];
   138             do {
   139                 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
   140                 xlat_delete_block(block);
   141                 p = block->chain;
   142             } while( p != NULL );
   143         }
   144         page[i] = NULL;
   145     }
   146 }
   148 void FASTCALL xlat_invalidate_word( sh4addr_t addr )
   149 {
   150     void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
   151     if( page != NULL ) {
   152         int entry = XLAT_LUT_ENTRY(addr);
   153         if( page[entry] != NULL ) {
   154             xlat_flush_page_by_lut(page);
   155         }
   156     }
   157 }
   159 void FASTCALL xlat_invalidate_long( sh4addr_t addr )
   160 {
   161     void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
   162     if( page != NULL ) {
   163         int entry = XLAT_LUT_ENTRY(addr);
   164         if( *(uint64_t *)&page[entry] != 0 ) {
   165             xlat_flush_page_by_lut(page);
   166         }
   167     }
   168 }
   170 void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
   171 {
   172     int i;
   173     int entry_count = size >> 1; // words;
   174     uint32_t page_no = XLAT_LUT_PAGE(address);
   175     int entry = XLAT_LUT_ENTRY(address);
   176     do {
   177         void **page = xlat_lut[page_no];
   178         int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
   179         if( entry_count < page_entries ) {
   180             page_entries = entry_count;
   181         }
   182         if( page != NULL ) {
   183             if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
   184                 /* Overwriting the entire page anyway */
   185                 xlat_flush_page_by_lut(page);
   186             } else {
   187                 for( i=entry; i<entry+page_entries; i++ ) {
   188                     if( page[i] != NULL ) {
   189                         xlat_flush_page_by_lut(page);
   190                         break;
   191                     }
   192                 }
   193             }
   194             entry_count -= page_entries;
   195         }
   196         page_no ++;
   197         entry_count -= page_entries;
   198         entry = 0;
   199     } while( entry_count > 0 );
   200 }
   202 void FASTCALL xlat_flush_page( sh4addr_t address )
   203 {
   204     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   205     if( page != NULL ) {
   206         xlat_flush_page_by_lut(page);
   207     }
   208 }
   210 void * FASTCALL xlat_get_code( sh4addr_t address )
   211 {
   212     void *result = NULL;
   213     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   214     if( page != NULL ) {
   215         result = (void *)(((uintptr_t)(page[XLAT_LUT_ENTRY(address)])) & (~((uintptr_t)0x03)));
   216     }
   217     return result;
   218 }
   220 xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
   221 {
   222     if( code != NULL ) {
   223         uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
   224         xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
   225         uint32_t count = block->recover_table_size;
   226         xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
   227         uint32_t posn;
   228         for( posn = 1; posn < count; posn++ ) {
   229         	if( records[posn].xlat_offset >= pc_offset ) {
   230         		return &records[posn-1];
   231         	}
   232         }
   233         return &records[count-1];
   234     }
   235     return NULL;	
   236 }
   238 void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
   239 {
   240     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   242     /* Add the LUT entry for the block */
   243     if( page == NULL ) {
   244         xlat_lut[XLAT_LUT_PAGE(address)] = page =
   245             (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
   246                     MAP_PRIVATE|MAP_ANON, -1, 0 );
   247         memset( page, 0, XLAT_LUT_PAGE_SIZE );
   248     }
   250     return &page[XLAT_LUT_ENTRY(address)];
   251 }
   255 uint32_t FASTCALL xlat_get_block_size( void *block )
   256 {
   257     xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
   258     return xlt->size;
   259 }
   261 uint32_t FASTCALL xlat_get_code_size( void *block )
   262 {
   263     xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
   264     if( xlt->recover_table_offset == 0 ) {
   265         return xlt->size;
   266     } else {
   267         return xlt->recover_table_offset;
   268     }
   269 }
   271 /**
   272  * Cut the specified block so that it has the given size, with the remaining data
   273  * forming a new free block. If the free block would be less than the minimum size,
   274  * the cut is not performed.
   275  * @return the next block after the (possibly cut) block.
   276  */
   277 static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
   278 {
   279     cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
   280     assert( cutsize <= block->size );
   281     if( block->size > cutsize + MIN_TOTAL_SIZE ) {
   282         int oldsize = block->size;
   283         block->size = cutsize;
   284         xlat_cache_block_t next = NEXT(block);
   285         next->active = 0;
   286         next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
   287         return next;
   288     } else {
   289         return NEXT(block);
   290     }
   291 }
   293 #ifdef XLAT_GENERATIONAL_CACHE
   294 /**
   295  * Promote a block in temp space (or elsewhere for that matter) to old space.
   296  *
   297  * @param block to promote.
   298  */
   299 static void xlat_promote_to_old_space( xlat_cache_block_t block )
   300 {
   301     int allocation = (int)-sizeof(struct xlat_cache_block);
   302     int size = block->size;
   303     xlat_cache_block_t curr = xlat_old_cache_ptr;
   304     xlat_cache_block_t start_block = curr;
   305     do {
   306         allocation += curr->size + sizeof(struct xlat_cache_block);
   307         curr = NEXT(curr);
   308         if( allocation > size ) {
   309             break; /* done */
   310         }
   311         if( curr->size == 0 ) { /* End-of-cache Sentinel */
   312             /* Leave what we just released as free space and start again from the
   313              * top of the cache
   314              */
   315             start_block->active = 0;
   316             start_block->size = allocation;
   317             allocation = (int)-sizeof(struct xlat_cache_block);
   318             start_block = curr = xlat_old_cache;
   319         }
   320     } while(1);
   321     start_block->active = 1;
   322     start_block->size = allocation;
   323     start_block->lut_entry = block->lut_entry;
   324     start_block->chain = block->chain;
   325     start_block->fpscr_mask = block->fpscr_mask;
   326     start_block->fpscr = block->fpscr;
   327     start_block->recover_table_offset = block->recover_table_offset;
   328     start_block->recover_table_size = block->recover_table_size;
   329     *block->lut_entry = &start_block->code;
   330     memcpy( start_block->code, block->code, block->size );
   331     xlat_old_cache_ptr = xlat_cut_block(start_block, size );
   332     if( xlat_old_cache_ptr->size == 0 ) {
   333         xlat_old_cache_ptr = xlat_old_cache;
   334     }
   335 }
   337 /**
   338  * Similarly to the above method, promotes a block to temp space.
   339  * TODO: Try to combine these - they're nearly identical
   340  */
   341 void xlat_promote_to_temp_space( xlat_cache_block_t block )
   342 {
   343     int size = block->size;
   344     int allocation = (int)-sizeof(struct xlat_cache_block);
   345     xlat_cache_block_t curr = xlat_temp_cache_ptr;
   346     xlat_cache_block_t start_block = curr;
   347     do {
   348         if( curr->active == BLOCK_USED ) {
   349             xlat_promote_to_old_space( curr );
   350         } else if( curr->active == BLOCK_ACTIVE ) {
   351             // Active but not used, release block
   352             *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
   353         }
   354         allocation += curr->size + sizeof(struct xlat_cache_block);
   355         curr = NEXT(curr);
   356         if( allocation > size ) {
   357             break; /* done */
   358         }
   359         if( curr->size == 0 ) { /* End-of-cache Sentinel */
   360             /* Leave what we just released as free space and start again from the
   361              * top of the cache
   362              */
   363             start_block->active = 0;
   364             start_block->size = allocation;
   365             allocation = (int)-sizeof(struct xlat_cache_block);
   366             start_block = curr = xlat_temp_cache;
   367         }
   368     } while(1);
   369     start_block->active = 1;
   370     start_block->size = allocation;
   371     start_block->lut_entry = block->lut_entry;
   372     start_block->chain = block->chain;
   373     start_block->fpscr_mask = block->fpscr_mask;
   374     start_block->fpscr = block->fpscr;
   375     start_block->recover_table_offset = block->recover_table_offset;
   376     start_block->recover_table_size = block->recover_table_size;
   377     *block->lut_entry = &start_block->code;
   378     memcpy( start_block->code, block->code, block->size );
   379     xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
   380     if( xlat_temp_cache_ptr->size == 0 ) {
   381         xlat_temp_cache_ptr = xlat_temp_cache;
   382     }
   384 }
   385 #else 
   386 void xlat_promote_to_temp_space( xlat_cache_block_t block )
   387 {
   388     *block->lut_entry = block->chain;
   389     xlat_delete_block(block);
   390 }
   391 #endif
   393 /**
   394  * Returns the next block in the new cache list that can be written to by the
   395  * translator. If the next block is active, it is evicted first.
   396  */
   397 xlat_cache_block_t xlat_start_block( sh4addr_t address )
   398 {
   399     if( xlat_new_cache_ptr->size == 0 ) {
   400         xlat_new_cache_ptr = xlat_new_cache;
   401     }
   403     if( xlat_new_cache_ptr->active ) {
   404         xlat_promote_to_temp_space( xlat_new_cache_ptr );
   405     }
   406     xlat_new_create_ptr = xlat_new_cache_ptr;
   407     xlat_new_create_ptr->active = 1;
   408     xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   410     /* Add the LUT entry for the block */
   411     if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) {
   412         xlat_lut[XLAT_LUT_PAGE(address)] =
   413             (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
   414                     MAP_PRIVATE|MAP_ANON, -1, 0 );
   415         memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE );
   416     }
   418     if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) {
   419         void *p = xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)];
   420         xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_CODE(p);
   421         assert( oldblock->active );
   422         xlat_new_create_ptr->chain = p;
   423     } else {
   424         xlat_new_create_ptr->chain = NULL;
   425     }
   426     xlat_new_create_ptr->use_list = NULL;
   428     xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] = 
   429         &xlat_new_create_ptr->code;
   430     xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address);
   432     return xlat_new_create_ptr;
   433 }
   435 xlat_cache_block_t xlat_extend_block( uint32_t newSize )
   436 {
   437     assert( xlat_new_create_ptr->use_list == NULL );
   438     while( xlat_new_create_ptr->size < newSize ) {
   439         if( xlat_new_cache_ptr->size == 0 ) {
   440             /* Migrate to the front of the cache to keep it contiguous */
   441             xlat_new_create_ptr->active = 0;
   442             sh4ptr_t olddata = xlat_new_create_ptr->code;
   443             int oldsize = xlat_new_create_ptr->size;
   444             int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
   445             void **lut_entry = xlat_new_create_ptr->lut_entry;
   446             void *chain = xlat_new_create_ptr->chain;
   447             int allocation = (int)-sizeof(struct xlat_cache_block);
   448             xlat_new_cache_ptr = xlat_new_cache;
   449             do {
   450                 if( xlat_new_cache_ptr->active ) {
   451                     xlat_promote_to_temp_space( xlat_new_cache_ptr );
   452                 }
   453                 allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
   454                 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   455             } while( allocation < size );
   456             xlat_new_create_ptr = xlat_new_cache;
   457             xlat_new_create_ptr->active = 1;
   458             xlat_new_create_ptr->size = allocation;
   459             xlat_new_create_ptr->lut_entry = lut_entry;
   460             xlat_new_create_ptr->chain = chain;
   461             xlat_new_create_ptr->use_list = NULL;
   462             *lut_entry = &xlat_new_create_ptr->code;
   463             memmove( xlat_new_create_ptr->code, olddata, oldsize );
   464         } else {
   465             if( xlat_new_cache_ptr->active ) {
   466                 xlat_promote_to_temp_space( xlat_new_cache_ptr );
   467             }
   468             xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
   469             xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   470         }
   471     }
   472     return xlat_new_create_ptr;
   474 }
   476 void xlat_commit_block( uint32_t destsize, uint32_t srcsize )
   477 {
   478     void **ptr = xlat_new_create_ptr->lut_entry;
   479     void **endptr = ptr + (srcsize>>1);
   480     while( ptr < endptr ) {
   481         if( *ptr == NULL ) {
   482             *ptr = XLAT_LUT_ENTRY_USED;
   483         }
   484         ptr++;
   485     }
   487     xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
   488 }
   490 void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
   491 {
   492     int foundptr = 0;
   493     xlat_cache_block_t tail = 
   494         (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
   496     assert( tail->active == 1 );
   497     assert( tail->size == 0 ); 
   498     while( cache < tail ) {
   499         assert( cache->active >= 0 && cache->active <= 2 );
   500         assert( cache->size >= 0 && cache->size < size );
   501         if( cache == ptr ) {
   502             foundptr = 1;
   503         }
   504         cache = NEXT(cache);
   505     }
   506     assert( cache == tail );
   507     assert( foundptr == 1 || tail == ptr );
   508 }
   510 /**
   511  * Perform a reverse lookup to determine the SH4 address corresponding to
   512  * the start of the code block containing ptr. This is _slow_ - it does a
   513  * linear scan of the lookup table to find this.
   514  *
   515  * If the pointer cannot be found in any live block, returns -1 (as this
   516  * is not a legal PC)
   517  */
   518 sh4addr_t xlat_get_address( unsigned char *ptr )
   519 {
   520     int i,j;
   521     for( i=0; i<XLAT_LUT_PAGES; i++ ) {
   522         void **page = xlat_lut[i];
   523         if( page != NULL ) {
   524             for( j=0; j<XLAT_LUT_PAGE_ENTRIES; j++ ) {
   525                 void *entry = page[j];
   526                 if( ((uintptr_t)entry) > (uintptr_t)XLAT_LUT_ENTRY_USED ) {
   527                     xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(entry);
   528                     if( ptr >= block->code && ptr < block->code + block->size) {
   529                         /* Found it */
   530                         return (i<<13) | (j<<1);
   531                     }
   532                 }
   533             }
   534         }
   535     }
   536     return -1;
   537 }
   539 /**
   540  * Sanity check that the given pointer is at least contained in one of cache
   541  * regions, and has a sane-ish size. We don't do a full region walk atm.
   542  */
   543 gboolean xlat_is_code_pointer( void *p )
   544 {
   545     char *region;
   546     uintptr_t region_size;
   548     xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
   549     if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) {
   550          /* Pointer is in new cache */
   551         region = (char *)xlat_new_cache;
   552         region_size = XLAT_NEW_CACHE_SIZE;
   553     }
   554 #ifdef XLAT_GENERATIONAL_CACHE
   555     else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) {
   556          /* Pointer is in temp cache */
   557         region = (char *)xlat_temp_cache;
   558         region_size = XLAT_TEMP_CACHE_SIZE;
   559     } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) {
   560         /* Pointer is in old cache */
   561         region = (char *)xlat_old_cache;
   562         region_size = XLAT_OLD_CACHE_SIZE;
   563     }
   564 #endif
   565     else {
   566         /* Not a valid cache pointer */
   567         return FALSE;
   568     }
   570     /* Make sure the whole block is in the region */
   571     if( (((char *)p) - region) >= region_size ||
   572         (((char *)(NEXT(block))) - region) >= region_size )
   573         return FALSE;
   574     return TRUE;
   575 }
   577 void xlat_check_integrity( )
   578 {
   579     xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
   580 #ifdef XLAT_GENERATIONAL_CACHE
   581     xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
   582     xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
   583 #endif
   584 }
   586 unsigned int xlat_get_active_block_count()
   587 {
   588     unsigned int count = 0;
   589     xlat_cache_block_t ptr = xlat_new_cache;
   590     while( ptr->size != 0 ) {
   591         if( ptr->active != 0 ) {
   592             count++;
   593         }
   594         ptr = NEXT(ptr);
   595     }
   596     return count;
   597 }
   599 unsigned int xlat_get_active_blocks( struct xlat_block_ref *blocks, unsigned int size )
   600 {
   601     unsigned int count = 0;
   602     xlat_cache_block_t ptr = xlat_new_cache;
   603     while( ptr->size != 0 ) {
   604         if( ptr->active != 0 ) {
   605             blocks[count].block = ptr;
   606             blocks[count].pc = 0;
   607             count++;
   608         }
   609         if( count >= size )
   610             break;
   611         ptr = NEXT(ptr);
   612     }
   613     return count;
   614 }
   616 static void xlat_get_block_pcs( struct xlat_block_ref *blocks, unsigned int size )
   617 {
   618     unsigned i;
   619     for( i=0; i<XLAT_LUT_PAGES;i ++ ) {
   620         void **page = xlat_lut[i];
   621         if( page != NULL ) {
   622             for( unsigned j=0; j < XLAT_LUT_PAGE_ENTRIES; j++ ) {
   623                 void *code = (void *)(((uintptr_t)(page[j])) & (~((uintptr_t)0x03)));
   624                 if( code != NULL ) {
   625                     xlat_cache_block_t ptr = XLAT_BLOCK_FOR_CODE(code);
   626                     sh4addr_t pc = XLAT_ADDR_FROM_ENTRY(i,j);
   627                     for( unsigned k=0; k<size; k++ ) {
   628                         if( blocks[k].block == ptr ) {
   629                             blocks[k].pc = pc;
   630                             ptr = ptr->chain;
   631                             if( ptr == NULL )
   632                                 break;
   633                             else {
   634                                 ptr = XLAT_BLOCK_FOR_CODE(ptr);
   635                                 k = 0;
   636                             }
   637                         }
   638                     }
   639                 }
   640             }
   641         }
   642     }
   643 }
   645 static int xlat_compare_active_field( const void *a, const void *b )
   646 {
   647     const struct xlat_block_ref *ptra = (const struct xlat_block_ref *)a;
   648     const struct xlat_block_ref *ptrb = (const struct xlat_block_ref *)b;
   649     return ptrb->block->active - ptra->block->active;
   650 }
   652 unsigned int xlat_get_cache_blocks_by_activity( xlat_block_ref_t outblocks, size_t topN )
   653 {
   654     int i=0;
   655     int count = xlat_get_active_block_count();
   657     struct xlat_block_ref blocks[count];
   658     xlat_get_active_blocks(blocks, count);
   659     xlat_get_block_pcs(blocks,count);
   660     qsort(blocks, count, sizeof(struct xlat_block_ref), xlat_compare_active_field);
   662     if( topN > count )
   663         topN = count;
   664     memcpy(outblocks, blocks, topN*sizeof(struct xlat_block_ref));
   665     return topN;
   666 }
.