Search
lxdream.org :: lxdream/src/xlat/xltcache.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/xlat/xltcache.c
changeset 1263:b3de98d19faf
prev1214:49152b3d8b75
next1298:d0eb2307b847
author nkeynes
date Fri Aug 24 08:53:50 2012 +1000 (11 years ago)
permissions -rw-r--r--
last change Move the generated prologue/epilogue code out into a common entry stub
(reduces space requirements) and pre-save all saved registers. Change
FASTCALL to use 3 regs instead of 2 since we can now keep everything in
regs.
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Translation cache management. This part is architecture independent.
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    19 #include <sys/types.h>
    20 #include <sys/mman.h>
    21 #include <assert.h>
    23 #include "dreamcast.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "xlat/xltcache.h"
    28 #define XLAT_LUT_PAGE_BITS 12
    29 #define XLAT_LUT_TOTAL_BITS 28
    30 #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
    31 #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
    33 #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
    34 #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
    35 #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
    37 #define XLAT_LUT_ENTRY_EMPTY (void *)0
    38 #define XLAT_LUT_ENTRY_USED  (void *)1
    40 #define XLAT_ADDR_FROM_ENTRY(pagenum,entrynum) ((((pagenum)&0xFFFF)<<13)|(((entrynum)<<1)&0x1FFE))
    42 #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
    43 #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
    44 #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
    45 #define IS_ENTRY_CONTINUATION(ent) (((uintptr_t)ent) & ((uintptr_t)XLAT_LUT_ENTRY_USED))
    46 #define IS_FIRST_ENTRY_IN_PAGE(addr) (((addr)&0x1FFE) == 0)
    47 #define XLAT_CODE_ADDR(ent) ((void *)(((uintptr_t)ent) & (~((uintptr_t)0x03))))
    48 #define XLAT_BLOCK_FOR_LUT_ENTRY(ent) XLAT_BLOCK_FOR_CODE(XLAT_CODE_ADDR(ent))
    51 #define MIN_BLOCK_SIZE 32
    52 #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
    54 #define BLOCK_INACTIVE 0
    55 #define BLOCK_ACTIVE 1
    56 #define BLOCK_USED 2
    58 xlat_cache_block_t xlat_new_cache;
    59 xlat_cache_block_t xlat_new_cache_ptr;
    60 xlat_cache_block_t xlat_new_create_ptr;
    62 #ifdef XLAT_GENERATIONAL_CACHE
    63 xlat_cache_block_t xlat_temp_cache;
    64 xlat_cache_block_t xlat_temp_cache_ptr;
    65 xlat_cache_block_t xlat_old_cache;
    66 xlat_cache_block_t xlat_old_cache_ptr;
    67 #endif
    69 static void **xlat_lut[XLAT_LUT_PAGES];
    70 static gboolean xlat_initialized = FALSE;
    71 static xlat_target_fns_t xlat_target = NULL;
    73 void xlat_cache_init(void) 
    74 {
    75     if( !xlat_initialized ) {
    76         xlat_initialized = TRUE;
    77         xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    78                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    79         xlat_new_cache_ptr = xlat_new_cache;
    80         xlat_new_create_ptr = xlat_new_cache;
    81 #ifdef XLAT_GENERATIONAL_CACHE
    82         xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    83                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    84         xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    85                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    86         xlat_temp_cache_ptr = xlat_temp_cache;
    87         xlat_old_cache_ptr = xlat_old_cache;
    88 #endif
    89 //        xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
    90 //                MAP_PRIVATE|MAP_ANON, -1, 0);
    91         memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
    92     }
    93     xlat_flush_cache();
    94 }
    96 void xlat_set_target_fns( xlat_target_fns_t target )
    97 {
    98     xlat_target = target;
    99 }
   101 /**
   102  * Reset the cache structure to its default state
   103  */
   104 void xlat_flush_cache() 
   105 {
   106     xlat_cache_block_t tmp;
   107     int i;
   108     xlat_new_cache_ptr = xlat_new_cache;
   109     xlat_new_cache_ptr->active = 0;
   110     xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   111     tmp = NEXT(xlat_new_cache_ptr);
   112     tmp->active = 1;
   113     tmp->size = 0;
   114 #ifdef XLAT_GENERATIONAL_CACHE
   115     xlat_temp_cache_ptr = xlat_temp_cache;
   116     xlat_temp_cache_ptr->active = 0;
   117     xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   118     tmp = NEXT(xlat_temp_cache_ptr);
   119     tmp->active = 1;
   120     tmp->size = 0;
   121     xlat_old_cache_ptr = xlat_old_cache;
   122     xlat_old_cache_ptr->active = 0;
   123     xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   124     tmp = NEXT(xlat_old_cache_ptr);
   125     tmp->active = 1;
   126     tmp->size = 0;
   127 #endif
   128     for( i=0; i<XLAT_LUT_PAGES; i++ ) {
   129         if( xlat_lut[i] != NULL ) {
   130             memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
   131         }
   132     }
   133 }
   135 void xlat_delete_block( xlat_cache_block_t block )
   136 {
   137     block->active = 0;
   138     *block->lut_entry = block->chain;
   139     if( block->use_list != NULL )
   140         xlat_target->unlink_block(block->use_list);
   141 }
   143 static void xlat_flush_page_by_lut( void **page )
   144 {
   145     int i;
   146     for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
   147         if( IS_ENTRY_POINT(page[i]) ) {
   148             void *p = XLAT_CODE_ADDR(page[i]);
   149             do {
   150                 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
   151                 xlat_delete_block(block);
   152                 p = block->chain;
   153             } while( p != NULL );
   154         }
   155         page[i] = NULL;
   156     }
   157 }
   159 void FASTCALL xlat_invalidate_word( sh4addr_t addr )
   160 {
   161     void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
   162     if( page != NULL ) {
   163         int entry = XLAT_LUT_ENTRY(addr);
   164         if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
   165             /* First entry may be a delay-slot for the previous page */
   166             xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
   167         }
   168         if( page[entry] != NULL ) {
   169             xlat_flush_page_by_lut(page);
   170         }
   171     }
   172 }
   174 void FASTCALL xlat_invalidate_long( sh4addr_t addr )
   175 {
   176     void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
   177     if( page != NULL ) {
   178         int entry = XLAT_LUT_ENTRY(addr);
   179         if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
   180             /* First entry may be a delay-slot for the previous page */
   181             xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
   182         }
   183         if( *(uint64_t *)&page[entry] != 0 ) {
   184             xlat_flush_page_by_lut(page);
   185         }
   186     }
   187 }
   189 void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
   190 {
   191     int i;
   192     int entry_count = size >> 1; // words;
   193     uint32_t page_no = XLAT_LUT_PAGE(address);
   194     int entry = XLAT_LUT_ENTRY(address);
   196     if( entry == 0 && xlat_lut[page_no] != NULL && IS_ENTRY_CONTINUATION(xlat_lut[page_no][entry])) {
   197         /* First entry may be a delay-slot for the previous page */
   198         xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(address-2)]);
   199     }
   200     do {
   201         void **page = xlat_lut[page_no];
   202         int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
   203         if( entry_count < page_entries ) {
   204             page_entries = entry_count;
   205         }
   206         if( page != NULL ) {
   207             if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
   208                 /* Overwriting the entire page anyway */
   209                 xlat_flush_page_by_lut(page);
   210             } else {
   211                 for( i=entry; i<entry+page_entries; i++ ) {
   212                     if( page[i] != NULL ) {
   213                         xlat_flush_page_by_lut(page);
   214                         break;
   215                     }
   216                 }
   217             }
   218             entry_count -= page_entries;
   219         }
   220         page_no ++;
   221         entry_count -= page_entries;
   222         entry = 0;
   223     } while( entry_count > 0 );
   224 }
   226 void FASTCALL xlat_flush_page( sh4addr_t address )
   227 {
   228     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   229     if( page != NULL ) {
   230         xlat_flush_page_by_lut(page);
   231     }
   232 }
   234 void * FASTCALL xlat_get_code( sh4addr_t address )
   235 {
   236     void *result = NULL;
   237     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   238     if( page != NULL ) {
   239         result = XLAT_CODE_ADDR(page[XLAT_LUT_ENTRY(address)]);
   240     }
   241     return result;
   242 }
   244 xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
   245 {
   246     if( code != NULL ) {
   247         uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
   248         xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
   249         uint32_t count = block->recover_table_size;
   250         xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
   251         uint32_t posn;
   252         for( posn = 1; posn < count; posn++ ) {
   253         	if( records[posn].xlat_offset >= pc_offset ) {
   254         		return &records[posn-1];
   255         	}
   256         }
   257         return &records[count-1];
   258     }
   259     return NULL;	
   260 }
   262 static void **xlat_get_lut_page( sh4addr_t address )
   263 {
   264     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   266      /* Add the LUT entry for the block */
   267      if( page == NULL ) {
   268          xlat_lut[XLAT_LUT_PAGE(address)] = page =
   269              (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
   270                      MAP_PRIVATE|MAP_ANON, -1, 0 );
   271          memset( page, 0, XLAT_LUT_PAGE_SIZE );
   272      }
   274      return page;
   275 }
   277 void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
   278 {
   279     void **page = xlat_get_lut_page(address);
   280     return &page[XLAT_LUT_ENTRY(address)];
   281 }
   285 uint32_t FASTCALL xlat_get_block_size( void *block )
   286 {
   287     xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
   288     return xlt->size;
   289 }
   291 uint32_t FASTCALL xlat_get_code_size( void *block )
   292 {
   293     xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
   294     if( xlt->recover_table_offset == 0 ) {
   295         return xlt->size;
   296     } else {
   297         return xlt->recover_table_offset;
   298     }
   299 }
   301 /**
   302  * Cut the specified block so that it has the given size, with the remaining data
   303  * forming a new free block. If the free block would be less than the minimum size,
   304  * the cut is not performed.
   305  * @return the next block after the (possibly cut) block.
   306  */
   307 static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
   308 {
   309     cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
   310     assert( cutsize <= block->size );
   311     if( block->size > cutsize + MIN_TOTAL_SIZE ) {
   312         int oldsize = block->size;
   313         block->size = cutsize;
   314         xlat_cache_block_t next = NEXT(block);
   315         next->active = 0;
   316         next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
   317         return next;
   318     } else {
   319         return NEXT(block);
   320     }
   321 }
   323 #ifdef XLAT_GENERATIONAL_CACHE
   324 /**
   325  * Promote a block in temp space (or elsewhere for that matter) to old space.
   326  *
   327  * @param block to promote.
   328  */
   329 static void xlat_promote_to_old_space( xlat_cache_block_t block )
   330 {
   331     int allocation = (int)-sizeof(struct xlat_cache_block);
   332     int size = block->size;
   333     xlat_cache_block_t curr = xlat_old_cache_ptr;
   334     xlat_cache_block_t start_block = curr;
   335     do {
   336         allocation += curr->size + sizeof(struct xlat_cache_block);
   337         curr = NEXT(curr);
   338         if( allocation > size ) {
   339             break; /* done */
   340         }
   341         if( curr->size == 0 ) { /* End-of-cache Sentinel */
   342             /* Leave what we just released as free space and start again from the
   343              * top of the cache
   344              */
   345             start_block->active = 0;
   346             start_block->size = allocation;
   347             allocation = (int)-sizeof(struct xlat_cache_block);
   348             start_block = curr = xlat_old_cache;
   349         }
   350     } while(1);
   351     start_block->active = 1;
   352     start_block->size = allocation;
   353     start_block->lut_entry = block->lut_entry;
   354     start_block->chain = block->chain;
   355     start_block->fpscr_mask = block->fpscr_mask;
   356     start_block->fpscr = block->fpscr;
   357     start_block->recover_table_offset = block->recover_table_offset;
   358     start_block->recover_table_size = block->recover_table_size;
   359     *block->lut_entry = &start_block->code;
   360     memcpy( start_block->code, block->code, block->size );
   361     xlat_old_cache_ptr = xlat_cut_block(start_block, size );
   362     if( xlat_old_cache_ptr->size == 0 ) {
   363         xlat_old_cache_ptr = xlat_old_cache;
   364     }
   365 }
   367 /**
   368  * Similarly to the above method, promotes a block to temp space.
   369  * TODO: Try to combine these - they're nearly identical
   370  */
   371 void xlat_promote_to_temp_space( xlat_cache_block_t block )
   372 {
   373     int size = block->size;
   374     int allocation = (int)-sizeof(struct xlat_cache_block);
   375     xlat_cache_block_t curr = xlat_temp_cache_ptr;
   376     xlat_cache_block_t start_block = curr;
   377     do {
   378         if( curr->active == BLOCK_USED ) {
   379             xlat_promote_to_old_space( curr );
   380         } else if( curr->active == BLOCK_ACTIVE ) {
   381             // Active but not used, release block
   382             *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
   383         }
   384         allocation += curr->size + sizeof(struct xlat_cache_block);
   385         curr = NEXT(curr);
   386         if( allocation > size ) {
   387             break; /* done */
   388         }
   389         if( curr->size == 0 ) { /* End-of-cache Sentinel */
   390             /* Leave what we just released as free space and start again from the
   391              * top of the cache
   392              */
   393             start_block->active = 0;
   394             start_block->size = allocation;
   395             allocation = (int)-sizeof(struct xlat_cache_block);
   396             start_block = curr = xlat_temp_cache;
   397         }
   398     } while(1);
   399     start_block->active = 1;
   400     start_block->size = allocation;
   401     start_block->lut_entry = block->lut_entry;
   402     start_block->chain = block->chain;
   403     start_block->fpscr_mask = block->fpscr_mask;
   404     start_block->fpscr = block->fpscr;
   405     start_block->recover_table_offset = block->recover_table_offset;
   406     start_block->recover_table_size = block->recover_table_size;
   407     *block->lut_entry = &start_block->code;
   408     memcpy( start_block->code, block->code, block->size );
   409     xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
   410     if( xlat_temp_cache_ptr->size == 0 ) {
   411         xlat_temp_cache_ptr = xlat_temp_cache;
   412     }
   414 }
   415 #else 
   416 void xlat_promote_to_temp_space( xlat_cache_block_t block )
   417 {
   418     *block->lut_entry = block->chain;
   419     xlat_delete_block(block);
   420 }
   421 #endif
   423 /**
   424  * Returns the next block in the new cache list that can be written to by the
   425  * translator. If the next block is active, it is evicted first.
   426  */
   427 xlat_cache_block_t xlat_start_block( sh4addr_t address )
   428 {
   429     if( xlat_new_cache_ptr->size == 0 ) {
   430         xlat_new_cache_ptr = xlat_new_cache;
   431     }
   433     if( xlat_new_cache_ptr->active ) {
   434         xlat_promote_to_temp_space( xlat_new_cache_ptr );
   435     }
   436     xlat_new_create_ptr = xlat_new_cache_ptr;
   437     xlat_new_create_ptr->active = 1;
   438     xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   440     /* Add the LUT entry for the block */
   441     void **p = xlat_get_lut_entry(address);
   442     void *entry = *p;
   443     if( IS_ENTRY_POINT(entry) ) {
   444         xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
   445         assert( oldblock->active );
   446         xlat_new_create_ptr->chain = XLAT_CODE_ADDR(entry);
   447     } else {
   448         xlat_new_create_ptr->chain = NULL;
   449     }
   450     xlat_new_create_ptr->use_list = NULL;
   452     *p = &xlat_new_create_ptr->code;
   453     if( IS_ENTRY_CONTINUATION(entry) ) {
   454         *((uintptr_t *)p) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
   455     }
   456     xlat_new_create_ptr->lut_entry = p;
   458     return xlat_new_create_ptr;
   459 }
   461 xlat_cache_block_t xlat_extend_block( uint32_t newSize )
   462 {
   463     assert( xlat_new_create_ptr->use_list == NULL );
   464     while( xlat_new_create_ptr->size < newSize ) {
   465         if( xlat_new_cache_ptr->size == 0 ) {
   466             /* Migrate to the front of the cache to keep it contiguous */
   467             xlat_new_create_ptr->active = 0;
   468             sh4ptr_t olddata = xlat_new_create_ptr->code;
   469             int oldsize = xlat_new_create_ptr->size;
   470             int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
   471             void **lut_entry = xlat_new_create_ptr->lut_entry;
   472             void *chain = xlat_new_create_ptr->chain;
   473             int allocation = (int)-sizeof(struct xlat_cache_block);
   474             xlat_new_cache_ptr = xlat_new_cache;
   475             do {
   476                 if( xlat_new_cache_ptr->active ) {
   477                     xlat_promote_to_temp_space( xlat_new_cache_ptr );
   478                 }
   479                 allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
   480                 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   481             } while( allocation < size );
   482             xlat_new_create_ptr = xlat_new_cache;
   483             xlat_new_create_ptr->active = 1;
   484             xlat_new_create_ptr->size = allocation;
   485             xlat_new_create_ptr->lut_entry = lut_entry;
   486             xlat_new_create_ptr->chain = chain;
   487             xlat_new_create_ptr->use_list = NULL;
   488             *lut_entry = &xlat_new_create_ptr->code;
   489             memmove( xlat_new_create_ptr->code, olddata, oldsize );
   490         } else {
   491             if( xlat_new_cache_ptr->active ) {
   492                 xlat_promote_to_temp_space( xlat_new_cache_ptr );
   493             }
   494             xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
   495             xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   496         }
   497     }
   498     return xlat_new_create_ptr;
   500 }
   502 void xlat_commit_block( uint32_t destsize, sh4addr_t startpc, sh4addr_t endpc )
   503 {
   504     void **entry = xlat_get_lut_entry(startpc+2);
   505     /* assume main entry has already been set at this point */
   507     for( sh4addr_t pc = startpc+2; pc < endpc; pc += 2 ) {
   508         if( XLAT_LUT_ENTRY(pc) == 0 )
   509             entry = xlat_get_lut_entry(pc);
   510         *((uintptr_t *)entry) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
   511         entry++;
   512     }
   514     xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
   515 }
   517 void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
   518 {
   519     int foundptr = 0;
   520     xlat_cache_block_t tail = 
   521         (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
   523     assert( tail->active == 1 );
   524     assert( tail->size == 0 ); 
   525     while( cache < tail ) {
   526         assert( cache->active >= 0 && cache->active <= 2 );
   527         assert( cache->size >= 0 && cache->size < size );
   528         if( cache == ptr ) {
   529             foundptr = 1;
   530         }
   531         cache = NEXT(cache);
   532     }
   533     assert( cache == tail );
   534     assert( foundptr == 1 || tail == ptr );
   535 }
   537 /**
   538  * Perform a reverse lookup to determine the SH4 address corresponding to
   539  * the start of the code block containing ptr. This is _slow_ - it does a
   540  * linear scan of the lookup table to find this.
   541  *
   542  * If the pointer cannot be found in any live block, returns -1 (as this
   543  * is not a legal PC)
   544  */
   545 sh4addr_t xlat_get_address( unsigned char *ptr )
   546 {
   547     int i,j;
   548     for( i=0; i<XLAT_LUT_PAGES; i++ ) {
   549         void **page = xlat_lut[i];
   550         if( page != NULL ) {
   551             for( j=0; j<XLAT_LUT_PAGE_ENTRIES; j++ ) {
   552                 void *entry = page[j];
   553                 if( ((uintptr_t)entry) > (uintptr_t)XLAT_LUT_ENTRY_USED ) {
   554                     xlat_cache_block_t block = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
   555                     if( ptr >= block->code && ptr < block->code + block->size) {
   556                         /* Found it */
   557                         return (i<<13) | (j<<1);
   558                     }
   559                 }
   560             }
   561         }
   562     }
   563     return -1;
   564 }
   566 /**
   567  * Sanity check that the given pointer is at least contained in one of cache
   568  * regions, and has a sane-ish size. We don't do a full region walk atm.
   569  */
   570 gboolean xlat_is_code_pointer( void *p )
   571 {
   572     char *region;
   573     uintptr_t region_size;
   575     xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
   576     if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) {
   577          /* Pointer is in new cache */
   578         region = (char *)xlat_new_cache;
   579         region_size = XLAT_NEW_CACHE_SIZE;
   580     }
   581 #ifdef XLAT_GENERATIONAL_CACHE
   582     else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) {
   583          /* Pointer is in temp cache */
   584         region = (char *)xlat_temp_cache;
   585         region_size = XLAT_TEMP_CACHE_SIZE;
   586     } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) {
   587         /* Pointer is in old cache */
   588         region = (char *)xlat_old_cache;
   589         region_size = XLAT_OLD_CACHE_SIZE;
   590     }
   591 #endif
   592     else {
   593         /* Not a valid cache pointer */
   594         return FALSE;
   595     }
   597     /* Make sure the whole block is in the region */
   598     if( (((char *)p) - region) >= region_size ||
   599         (((char *)(NEXT(block))) - region) >= region_size )
   600         return FALSE;
   601     return TRUE;
   602 }
   604 void xlat_check_integrity( )
   605 {
   606     xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
   607 #ifdef XLAT_GENERATIONAL_CACHE
   608     xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
   609     xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
   610 #endif
   611 }
   613 unsigned int xlat_get_active_block_count()
   614 {
   615     unsigned int count = 0;
   616     xlat_cache_block_t ptr = xlat_new_cache;
   617     while( ptr->size != 0 ) {
   618         if( ptr->active != 0 ) {
   619             count++;
   620         }
   621         ptr = NEXT(ptr);
   622     }
   623     return count;
   624 }
   626 unsigned int xlat_get_active_blocks( struct xlat_block_ref *blocks, unsigned int size )
   627 {
   628     unsigned int count = 0;
   629     xlat_cache_block_t ptr = xlat_new_cache;
   630     while( ptr->size != 0 ) {
   631         if( ptr->active != 0 ) {
   632             blocks[count].block = ptr;
   633             blocks[count].pc = 0;
   634             count++;
   635         }
   636         if( count >= size )
   637             break;
   638         ptr = NEXT(ptr);
   639     }
   640     return count;
   641 }
   643 static void xlat_get_block_pcs( struct xlat_block_ref *blocks, unsigned int size )
   644 {
   645     unsigned i;
   646     for( i=0; i<XLAT_LUT_PAGES;i ++ ) {
   647         void **page = xlat_lut[i];
   648         if( page != NULL ) {
   649             for( unsigned j=0; j < XLAT_LUT_PAGE_ENTRIES; j++ ) {
   650                 void *code = XLAT_CODE_ADDR(page[j]);
   651                 if( code != NULL ) {
   652                     xlat_cache_block_t ptr = XLAT_BLOCK_FOR_CODE(code);
   653                     sh4addr_t pc = XLAT_ADDR_FROM_ENTRY(i,j);
   654                     for( unsigned k=0; k<size; k++ ) {
   655                         if( blocks[k].block == ptr ) {
   656                             blocks[k].pc = pc;
   657                             ptr = ptr->chain;
   658                             if( ptr == NULL )
   659                                 break;
   660                             else {
   661                                 ptr = XLAT_BLOCK_FOR_CODE(ptr);
   662                                 k = 0;
   663                             }
   664                         }
   665                     }
   666                 }
   667             }
   668         }
   669     }
   670 }
   672 static int xlat_compare_active_field( const void *a, const void *b )
   673 {
   674     const struct xlat_block_ref *ptra = (const struct xlat_block_ref *)a;
   675     const struct xlat_block_ref *ptrb = (const struct xlat_block_ref *)b;
   676     return ptrb->block->active - ptra->block->active;
   677 }
   679 unsigned int xlat_get_cache_blocks_by_activity( xlat_block_ref_t outblocks, size_t topN )
   680 {
   681     int i=0;
   682     int count = xlat_get_active_block_count();
   684     struct xlat_block_ref blocks[count];
   685     xlat_get_active_blocks(blocks, count);
   686     xlat_get_block_pcs(blocks,count);
   687     qsort(blocks, count, sizeof(struct xlat_block_ref), xlat_compare_active_field);
   689     if( topN > count )
   690         topN = count;
   691     memcpy(outblocks, blocks, topN*sizeof(struct xlat_block_ref));
   692     return topN;
   693 }
.