Search
lxdream.org :: lxdream/src/xlat/xltcache.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/xlat/xltcache.c
changeset 1214:49152b3d8b75
prev1195:072131b61d2a
next1263:b3de98d19faf
author nkeynes
date Fri Feb 24 17:31:18 2012 +1000 (12 years ago)
permissions -rw-r--r--
last change Add some real option processing to genglsl and let it accept multiple glsl
input files (basically concatenate them together)
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Translation cache management. This part is architecture independent.
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    19 #include <sys/types.h>
    20 #include <sys/mman.h>
    21 #include <assert.h>
    23 #include "dreamcast.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/sh4trans.h"
    26 #include "xlat/xltcache.h"
    27 #include "x86dasm/x86dasm.h"
    29 #define XLAT_LUT_PAGE_BITS 12
    30 #define XLAT_LUT_TOTAL_BITS 28
    31 #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
    32 #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
    34 #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
    35 #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
    36 #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
    38 #define XLAT_LUT_ENTRY_EMPTY (void *)0
    39 #define XLAT_LUT_ENTRY_USED  (void *)1
    41 #define XLAT_ADDR_FROM_ENTRY(pagenum,entrynum) ((((pagenum)&0xFFFF)<<13)|(((entrynum)<<1)&0x1FFE))
    43 #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
    44 #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
    45 #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
    46 #define IS_ENTRY_CONTINUATION(ent) (((uintptr_t)ent) & ((uintptr_t)XLAT_LUT_ENTRY_USED))
    47 #define IS_FIRST_ENTRY_IN_PAGE(addr) (((addr)&0x1FFE) == 0)
    48 #define XLAT_CODE_ADDR(ent) ((void *)(((uintptr_t)ent) & (~((uintptr_t)0x03))))
    49 #define XLAT_BLOCK_FOR_LUT_ENTRY(ent) XLAT_BLOCK_FOR_CODE(XLAT_CODE_ADDR(ent))
    52 #define MIN_BLOCK_SIZE 32
    53 #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
    55 #define BLOCK_INACTIVE 0
    56 #define BLOCK_ACTIVE 1
    57 #define BLOCK_USED 2
    59 xlat_cache_block_t xlat_new_cache;
    60 xlat_cache_block_t xlat_new_cache_ptr;
    61 xlat_cache_block_t xlat_new_create_ptr;
    63 #ifdef XLAT_GENERATIONAL_CACHE
    64 xlat_cache_block_t xlat_temp_cache;
    65 xlat_cache_block_t xlat_temp_cache_ptr;
    66 xlat_cache_block_t xlat_old_cache;
    67 xlat_cache_block_t xlat_old_cache_ptr;
    68 #endif
    70 static void **xlat_lut[XLAT_LUT_PAGES];
    71 static gboolean xlat_initialized = FALSE;
    72 static xlat_target_fns_t xlat_target = NULL;
    74 void xlat_cache_init(void) 
    75 {
    76     if( !xlat_initialized ) {
    77         xlat_initialized = TRUE;
    78         xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    79                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    80         xlat_new_cache_ptr = xlat_new_cache;
    81         xlat_new_create_ptr = xlat_new_cache;
    82 #ifdef XLAT_GENERATIONAL_CACHE
    83         xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    84                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    85         xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    86                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    87         xlat_temp_cache_ptr = xlat_temp_cache;
    88         xlat_old_cache_ptr = xlat_old_cache;
    89 #endif
    90 //        xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
    91 //                MAP_PRIVATE|MAP_ANON, -1, 0);
    92         memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
    93     }
    94     xlat_flush_cache();
    95 }
    97 void xlat_set_target_fns( xlat_target_fns_t target )
    98 {
    99     xlat_target = target;
   100 }
   102 /**
   103  * Reset the cache structure to its default state
   104  */
   105 void xlat_flush_cache() 
   106 {
   107     xlat_cache_block_t tmp;
   108     int i;
   109     xlat_new_cache_ptr = xlat_new_cache;
   110     xlat_new_cache_ptr->active = 0;
   111     xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   112     tmp = NEXT(xlat_new_cache_ptr);
   113     tmp->active = 1;
   114     tmp->size = 0;
   115 #ifdef XLAT_GENERATIONAL_CACHE
   116     xlat_temp_cache_ptr = xlat_temp_cache;
   117     xlat_temp_cache_ptr->active = 0;
   118     xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   119     tmp = NEXT(xlat_temp_cache_ptr);
   120     tmp->active = 1;
   121     tmp->size = 0;
   122     xlat_old_cache_ptr = xlat_old_cache;
   123     xlat_old_cache_ptr->active = 0;
   124     xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   125     tmp = NEXT(xlat_old_cache_ptr);
   126     tmp->active = 1;
   127     tmp->size = 0;
   128 #endif
   129     for( i=0; i<XLAT_LUT_PAGES; i++ ) {
   130         if( xlat_lut[i] != NULL ) {
   131             memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
   132         }
   133     }
   134 }
   136 void xlat_delete_block( xlat_cache_block_t block )
   137 {
   138     block->active = 0;
   139     *block->lut_entry = block->chain;
   140     if( block->use_list != NULL )
   141         xlat_target->unlink_block(block->use_list);
   142 }
   144 static void xlat_flush_page_by_lut( void **page )
   145 {
   146     int i;
   147     for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
   148         if( IS_ENTRY_POINT(page[i]) ) {
   149             void *p = XLAT_CODE_ADDR(page[i]);
   150             do {
   151                 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
   152                 xlat_delete_block(block);
   153                 p = block->chain;
   154             } while( p != NULL );
   155         }
   156         page[i] = NULL;
   157     }
   158 }
   160 void FASTCALL xlat_invalidate_word( sh4addr_t addr )
   161 {
   162     void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
   163     if( page != NULL ) {
   164         int entry = XLAT_LUT_ENTRY(addr);
   165         if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
   166             /* First entry may be a delay-slot for the previous page */
   167             xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
   168         }
   169         if( page[entry] != NULL ) {
   170             xlat_flush_page_by_lut(page);
   171         }
   172     }
   173 }
   175 void FASTCALL xlat_invalidate_long( sh4addr_t addr )
   176 {
   177     void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
   178     if( page != NULL ) {
   179         int entry = XLAT_LUT_ENTRY(addr);
   180         if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
   181             /* First entry may be a delay-slot for the previous page */
   182             xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
   183         }
   184         if( *(uint64_t *)&page[entry] != 0 ) {
   185             xlat_flush_page_by_lut(page);
   186         }
   187     }
   188 }
   190 void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
   191 {
   192     int i;
   193     int entry_count = size >> 1; // words;
   194     uint32_t page_no = XLAT_LUT_PAGE(address);
   195     int entry = XLAT_LUT_ENTRY(address);
   197     if( entry == 0 && xlat_lut[page_no] != NULL && IS_ENTRY_CONTINUATION(xlat_lut[page_no][entry])) {
   198         /* First entry may be a delay-slot for the previous page */
   199         xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(address-2)]);
   200     }
   201     do {
   202         void **page = xlat_lut[page_no];
   203         int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
   204         if( entry_count < page_entries ) {
   205             page_entries = entry_count;
   206         }
   207         if( page != NULL ) {
   208             if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
   209                 /* Overwriting the entire page anyway */
   210                 xlat_flush_page_by_lut(page);
   211             } else {
   212                 for( i=entry; i<entry+page_entries; i++ ) {
   213                     if( page[i] != NULL ) {
   214                         xlat_flush_page_by_lut(page);
   215                         break;
   216                     }
   217                 }
   218             }
   219             entry_count -= page_entries;
   220         }
   221         page_no ++;
   222         entry_count -= page_entries;
   223         entry = 0;
   224     } while( entry_count > 0 );
   225 }
   227 void FASTCALL xlat_flush_page( sh4addr_t address )
   228 {
   229     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   230     if( page != NULL ) {
   231         xlat_flush_page_by_lut(page);
   232     }
   233 }
   235 void * FASTCALL xlat_get_code( sh4addr_t address )
   236 {
   237     void *result = NULL;
   238     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   239     if( page != NULL ) {
   240         result = XLAT_CODE_ADDR(page[XLAT_LUT_ENTRY(address)]);
   241     }
   242     return result;
   243 }
   245 xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
   246 {
   247     if( code != NULL ) {
   248         uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
   249         xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
   250         uint32_t count = block->recover_table_size;
   251         xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
   252         uint32_t posn;
   253         for( posn = 1; posn < count; posn++ ) {
   254         	if( records[posn].xlat_offset >= pc_offset ) {
   255         		return &records[posn-1];
   256         	}
   257         }
   258         return &records[count-1];
   259     }
   260     return NULL;	
   261 }
   263 static void **xlat_get_lut_page( sh4addr_t address )
   264 {
   265     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   267      /* Add the LUT entry for the block */
   268      if( page == NULL ) {
   269          xlat_lut[XLAT_LUT_PAGE(address)] = page =
   270              (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
   271                      MAP_PRIVATE|MAP_ANON, -1, 0 );
   272          memset( page, 0, XLAT_LUT_PAGE_SIZE );
   273      }
   275      return page;
   276 }
   278 void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
   279 {
   280     void **page = xlat_get_lut_page(address);
   281     return &page[XLAT_LUT_ENTRY(address)];
   282 }
   286 uint32_t FASTCALL xlat_get_block_size( void *block )
   287 {
   288     xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
   289     return xlt->size;
   290 }
   292 uint32_t FASTCALL xlat_get_code_size( void *block )
   293 {
   294     xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
   295     if( xlt->recover_table_offset == 0 ) {
   296         return xlt->size;
   297     } else {
   298         return xlt->recover_table_offset;
   299     }
   300 }
   302 /**
   303  * Cut the specified block so that it has the given size, with the remaining data
   304  * forming a new free block. If the free block would be less than the minimum size,
   305  * the cut is not performed.
   306  * @return the next block after the (possibly cut) block.
   307  */
   308 static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
   309 {
   310     cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
   311     assert( cutsize <= block->size );
   312     if( block->size > cutsize + MIN_TOTAL_SIZE ) {
   313         int oldsize = block->size;
   314         block->size = cutsize;
   315         xlat_cache_block_t next = NEXT(block);
   316         next->active = 0;
   317         next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
   318         return next;
   319     } else {
   320         return NEXT(block);
   321     }
   322 }
   324 #ifdef XLAT_GENERATIONAL_CACHE
   325 /**
   326  * Promote a block in temp space (or elsewhere for that matter) to old space.
   327  *
   328  * @param block to promote.
   329  */
   330 static void xlat_promote_to_old_space( xlat_cache_block_t block )
   331 {
   332     int allocation = (int)-sizeof(struct xlat_cache_block);
   333     int size = block->size;
   334     xlat_cache_block_t curr = xlat_old_cache_ptr;
   335     xlat_cache_block_t start_block = curr;
   336     do {
   337         allocation += curr->size + sizeof(struct xlat_cache_block);
   338         curr = NEXT(curr);
   339         if( allocation > size ) {
   340             break; /* done */
   341         }
   342         if( curr->size == 0 ) { /* End-of-cache Sentinel */
   343             /* Leave what we just released as free space and start again from the
   344              * top of the cache
   345              */
   346             start_block->active = 0;
   347             start_block->size = allocation;
   348             allocation = (int)-sizeof(struct xlat_cache_block);
   349             start_block = curr = xlat_old_cache;
   350         }
   351     } while(1);
   352     start_block->active = 1;
   353     start_block->size = allocation;
   354     start_block->lut_entry = block->lut_entry;
   355     start_block->chain = block->chain;
   356     start_block->fpscr_mask = block->fpscr_mask;
   357     start_block->fpscr = block->fpscr;
   358     start_block->recover_table_offset = block->recover_table_offset;
   359     start_block->recover_table_size = block->recover_table_size;
   360     *block->lut_entry = &start_block->code;
   361     memcpy( start_block->code, block->code, block->size );
   362     xlat_old_cache_ptr = xlat_cut_block(start_block, size );
   363     if( xlat_old_cache_ptr->size == 0 ) {
   364         xlat_old_cache_ptr = xlat_old_cache;
   365     }
   366 }
   368 /**
   369  * Similarly to the above method, promotes a block to temp space.
   370  * TODO: Try to combine these - they're nearly identical
   371  */
   372 void xlat_promote_to_temp_space( xlat_cache_block_t block )
   373 {
   374     int size = block->size;
   375     int allocation = (int)-sizeof(struct xlat_cache_block);
   376     xlat_cache_block_t curr = xlat_temp_cache_ptr;
   377     xlat_cache_block_t start_block = curr;
   378     do {
   379         if( curr->active == BLOCK_USED ) {
   380             xlat_promote_to_old_space( curr );
   381         } else if( curr->active == BLOCK_ACTIVE ) {
   382             // Active but not used, release block
   383             *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
   384         }
   385         allocation += curr->size + sizeof(struct xlat_cache_block);
   386         curr = NEXT(curr);
   387         if( allocation > size ) {
   388             break; /* done */
   389         }
   390         if( curr->size == 0 ) { /* End-of-cache Sentinel */
   391             /* Leave what we just released as free space and start again from the
   392              * top of the cache
   393              */
   394             start_block->active = 0;
   395             start_block->size = allocation;
   396             allocation = (int)-sizeof(struct xlat_cache_block);
   397             start_block = curr = xlat_temp_cache;
   398         }
   399     } while(1);
   400     start_block->active = 1;
   401     start_block->size = allocation;
   402     start_block->lut_entry = block->lut_entry;
   403     start_block->chain = block->chain;
   404     start_block->fpscr_mask = block->fpscr_mask;
   405     start_block->fpscr = block->fpscr;
   406     start_block->recover_table_offset = block->recover_table_offset;
   407     start_block->recover_table_size = block->recover_table_size;
   408     *block->lut_entry = &start_block->code;
   409     memcpy( start_block->code, block->code, block->size );
   410     xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
   411     if( xlat_temp_cache_ptr->size == 0 ) {
   412         xlat_temp_cache_ptr = xlat_temp_cache;
   413     }
   415 }
   416 #else 
   417 void xlat_promote_to_temp_space( xlat_cache_block_t block )
   418 {
   419     *block->lut_entry = block->chain;
   420     xlat_delete_block(block);
   421 }
   422 #endif
   424 /**
   425  * Returns the next block in the new cache list that can be written to by the
   426  * translator. If the next block is active, it is evicted first.
   427  */
   428 xlat_cache_block_t xlat_start_block( sh4addr_t address )
   429 {
   430     if( xlat_new_cache_ptr->size == 0 ) {
   431         xlat_new_cache_ptr = xlat_new_cache;
   432     }
   434     if( xlat_new_cache_ptr->active ) {
   435         xlat_promote_to_temp_space( xlat_new_cache_ptr );
   436     }
   437     xlat_new_create_ptr = xlat_new_cache_ptr;
   438     xlat_new_create_ptr->active = 1;
   439     xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   441     /* Add the LUT entry for the block */
   442     void **p = xlat_get_lut_entry(address);
   443     void *entry = *p;
   444     if( IS_ENTRY_POINT(entry) ) {
   445         xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
   446         assert( oldblock->active );
   447         xlat_new_create_ptr->chain = XLAT_CODE_ADDR(entry);
   448     } else {
   449         xlat_new_create_ptr->chain = NULL;
   450     }
   451     xlat_new_create_ptr->use_list = NULL;
   453     *p = &xlat_new_create_ptr->code;
   454     if( IS_ENTRY_CONTINUATION(entry) ) {
   455         *((uintptr_t *)p) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
   456     }
   457     xlat_new_create_ptr->lut_entry = p;
   459     return xlat_new_create_ptr;
   460 }
   462 xlat_cache_block_t xlat_extend_block( uint32_t newSize )
   463 {
   464     assert( xlat_new_create_ptr->use_list == NULL );
   465     while( xlat_new_create_ptr->size < newSize ) {
   466         if( xlat_new_cache_ptr->size == 0 ) {
   467             /* Migrate to the front of the cache to keep it contiguous */
   468             xlat_new_create_ptr->active = 0;
   469             sh4ptr_t olddata = xlat_new_create_ptr->code;
   470             int oldsize = xlat_new_create_ptr->size;
   471             int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
   472             void **lut_entry = xlat_new_create_ptr->lut_entry;
   473             void *chain = xlat_new_create_ptr->chain;
   474             int allocation = (int)-sizeof(struct xlat_cache_block);
   475             xlat_new_cache_ptr = xlat_new_cache;
   476             do {
   477                 if( xlat_new_cache_ptr->active ) {
   478                     xlat_promote_to_temp_space( xlat_new_cache_ptr );
   479                 }
   480                 allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
   481                 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   482             } while( allocation < size );
   483             xlat_new_create_ptr = xlat_new_cache;
   484             xlat_new_create_ptr->active = 1;
   485             xlat_new_create_ptr->size = allocation;
   486             xlat_new_create_ptr->lut_entry = lut_entry;
   487             xlat_new_create_ptr->chain = chain;
   488             xlat_new_create_ptr->use_list = NULL;
   489             *lut_entry = &xlat_new_create_ptr->code;
   490             memmove( xlat_new_create_ptr->code, olddata, oldsize );
   491         } else {
   492             if( xlat_new_cache_ptr->active ) {
   493                 xlat_promote_to_temp_space( xlat_new_cache_ptr );
   494             }
   495             xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
   496             xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   497         }
   498     }
   499     return xlat_new_create_ptr;
   501 }
   503 void xlat_commit_block( uint32_t destsize, sh4addr_t startpc, sh4addr_t endpc )
   504 {
   505     void **entry = xlat_get_lut_entry(startpc+2);
   506     /* assume main entry has already been set at this point */
   508     for( sh4addr_t pc = startpc+2; pc < endpc; pc += 2 ) {
   509         if( XLAT_LUT_ENTRY(pc) == 0 )
   510             entry = xlat_get_lut_entry(pc);
   511         *((uintptr_t *)entry) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
   512         entry++;
   513     }
   515     xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
   516 }
   518 void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
   519 {
   520     int foundptr = 0;
   521     xlat_cache_block_t tail = 
   522         (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
   524     assert( tail->active == 1 );
   525     assert( tail->size == 0 ); 
   526     while( cache < tail ) {
   527         assert( cache->active >= 0 && cache->active <= 2 );
   528         assert( cache->size >= 0 && cache->size < size );
   529         if( cache == ptr ) {
   530             foundptr = 1;
   531         }
   532         cache = NEXT(cache);
   533     }
   534     assert( cache == tail );
   535     assert( foundptr == 1 || tail == ptr );
   536 }
   538 /**
   539  * Perform a reverse lookup to determine the SH4 address corresponding to
   540  * the start of the code block containing ptr. This is _slow_ - it does a
   541  * linear scan of the lookup table to find this.
   542  *
   543  * If the pointer cannot be found in any live block, returns -1 (as this
   544  * is not a legal PC)
   545  */
   546 sh4addr_t xlat_get_address( unsigned char *ptr )
   547 {
   548     int i,j;
   549     for( i=0; i<XLAT_LUT_PAGES; i++ ) {
   550         void **page = xlat_lut[i];
   551         if( page != NULL ) {
   552             for( j=0; j<XLAT_LUT_PAGE_ENTRIES; j++ ) {
   553                 void *entry = page[j];
   554                 if( ((uintptr_t)entry) > (uintptr_t)XLAT_LUT_ENTRY_USED ) {
   555                     xlat_cache_block_t block = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
   556                     if( ptr >= block->code && ptr < block->code + block->size) {
   557                         /* Found it */
   558                         return (i<<13) | (j<<1);
   559                     }
   560                 }
   561             }
   562         }
   563     }
   564     return -1;
   565 }
   567 /**
   568  * Sanity check that the given pointer is at least contained in one of cache
   569  * regions, and has a sane-ish size. We don't do a full region walk atm.
   570  */
   571 gboolean xlat_is_code_pointer( void *p )
   572 {
   573     char *region;
   574     uintptr_t region_size;
   576     xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
   577     if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) {
   578          /* Pointer is in new cache */
   579         region = (char *)xlat_new_cache;
   580         region_size = XLAT_NEW_CACHE_SIZE;
   581     }
   582 #ifdef XLAT_GENERATIONAL_CACHE
   583     else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) {
   584          /* Pointer is in temp cache */
   585         region = (char *)xlat_temp_cache;
   586         region_size = XLAT_TEMP_CACHE_SIZE;
   587     } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) {
   588         /* Pointer is in old cache */
   589         region = (char *)xlat_old_cache;
   590         region_size = XLAT_OLD_CACHE_SIZE;
   591     }
   592 #endif
   593     else {
   594         /* Not a valid cache pointer */
   595         return FALSE;
   596     }
   598     /* Make sure the whole block is in the region */
   599     if( (((char *)p) - region) >= region_size ||
   600         (((char *)(NEXT(block))) - region) >= region_size )
   601         return FALSE;
   602     return TRUE;
   603 }
   605 void xlat_check_integrity( )
   606 {
   607     xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
   608 #ifdef XLAT_GENERATIONAL_CACHE
   609     xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
   610     xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
   611 #endif
   612 }
   614 unsigned int xlat_get_active_block_count()
   615 {
   616     unsigned int count = 0;
   617     xlat_cache_block_t ptr = xlat_new_cache;
   618     while( ptr->size != 0 ) {
   619         if( ptr->active != 0 ) {
   620             count++;
   621         }
   622         ptr = NEXT(ptr);
   623     }
   624     return count;
   625 }
   627 unsigned int xlat_get_active_blocks( struct xlat_block_ref *blocks, unsigned int size )
   628 {
   629     unsigned int count = 0;
   630     xlat_cache_block_t ptr = xlat_new_cache;
   631     while( ptr->size != 0 ) {
   632         if( ptr->active != 0 ) {
   633             blocks[count].block = ptr;
   634             blocks[count].pc = 0;
   635             count++;
   636         }
   637         if( count >= size )
   638             break;
   639         ptr = NEXT(ptr);
   640     }
   641     return count;
   642 }
   644 static void xlat_get_block_pcs( struct xlat_block_ref *blocks, unsigned int size )
   645 {
   646     unsigned i;
   647     for( i=0; i<XLAT_LUT_PAGES;i ++ ) {
   648         void **page = xlat_lut[i];
   649         if( page != NULL ) {
   650             for( unsigned j=0; j < XLAT_LUT_PAGE_ENTRIES; j++ ) {
   651                 void *code = XLAT_CODE_ADDR(page[j]);
   652                 if( code != NULL ) {
   653                     xlat_cache_block_t ptr = XLAT_BLOCK_FOR_CODE(code);
   654                     sh4addr_t pc = XLAT_ADDR_FROM_ENTRY(i,j);
   655                     for( unsigned k=0; k<size; k++ ) {
   656                         if( blocks[k].block == ptr ) {
   657                             blocks[k].pc = pc;
   658                             ptr = ptr->chain;
   659                             if( ptr == NULL )
   660                                 break;
   661                             else {
   662                                 ptr = XLAT_BLOCK_FOR_CODE(ptr);
   663                                 k = 0;
   664                             }
   665                         }
   666                     }
   667                 }
   668             }
   669         }
   670     }
   671 }
   673 static int xlat_compare_active_field( const void *a, const void *b )
   674 {
   675     const struct xlat_block_ref *ptra = (const struct xlat_block_ref *)a;
   676     const struct xlat_block_ref *ptrb = (const struct xlat_block_ref *)b;
   677     return ptrb->block->active - ptra->block->active;
   678 }
   680 unsigned int xlat_get_cache_blocks_by_activity( xlat_block_ref_t outblocks, size_t topN )
   681 {
   682     int i=0;
   683     int count = xlat_get_active_block_count();
   685     struct xlat_block_ref blocks[count];
   686     xlat_get_active_blocks(blocks, count);
   687     xlat_get_block_pcs(blocks,count);
   688     qsort(blocks, count, sizeof(struct xlat_block_ref), xlat_compare_active_field);
   690     if( topN > count )
   691         topN = count;
   692     memcpy(outblocks, blocks, topN*sizeof(struct xlat_block_ref));
   693     return topN;
   694 }
.