Search
lxdream.org :: lxdream/src/sh4/xltcache.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/xltcache.c
changeset 938:e377bd827c54
prev935:45246788ca00
next950:cc1e88104360
author nkeynes
date Sat Jan 03 03:30:26 2009 +0000 (15 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change MMU work-in-progress
* Move SDRAM out into separate sdram.c
* Move all page-table management into mmu.c
* Convert UTLB management to use the new page-tables
* Rip out all calls to mmu_vma_to_phys_* and replace with direct access
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Translation cache management. This part is architecture independent.
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    19 #include <sys/types.h>
    20 #include <sys/mman.h>
    21 #include <assert.h>
    23 #include "dreamcast.h"
    24 #include "sh4/sh4core.h"
    25 #include "sh4/xltcache.h"
    26 #include "x86dasm/x86dasm.h"
    28 #define XLAT_LUT_PAGE_BITS 12
    29 #define XLAT_LUT_TOTAL_BITS 28
    30 #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
    31 #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
    33 #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
    34 #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
    35 #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
    37 #define XLAT_LUT_ENTRY_EMPTY (void *)0
    38 #define XLAT_LUT_ENTRY_USED  (void *)1
    40 #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
    41 #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
    42 #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
    44 #define MIN_BLOCK_SIZE 32
    45 #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
    47 #define BLOCK_INACTIVE 0
    48 #define BLOCK_ACTIVE 1
    49 #define BLOCK_USED 2
    51 xlat_cache_block_t xlat_new_cache;
    52 xlat_cache_block_t xlat_new_cache_ptr;
    53 xlat_cache_block_t xlat_new_create_ptr;
    55 #ifdef XLAT_GENERATIONAL_CACHE
    56 xlat_cache_block_t xlat_temp_cache;
    57 xlat_cache_block_t xlat_temp_cache_ptr;
    58 xlat_cache_block_t xlat_old_cache;
    59 xlat_cache_block_t xlat_old_cache_ptr;
    60 #endif
    62 static void **xlat_lut[XLAT_LUT_PAGES];
    63 static gboolean xlat_initialized = FALSE;
    65 void xlat_cache_init(void) 
    66 {
    67     if( !xlat_initialized ) {
    68         xlat_initialized = TRUE;
    69         xlat_new_cache = mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    70                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    71         xlat_new_cache_ptr = xlat_new_cache;
    72         xlat_new_create_ptr = xlat_new_cache;
    73 #ifdef XLAT_GENERATIONAL_CACHE
    74         xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    75                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    76         xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    77                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    78         xlat_temp_cache_ptr = xlat_temp_cache;
    79         xlat_old_cache_ptr = xlat_old_cache;
    80 #endif
    81 //        xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
    82 //                MAP_PRIVATE|MAP_ANON, -1, 0);
    83         memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
    84     }
    85     xlat_flush_cache();
    86 }
    88 /**
    89  * Reset the cache structure to its default state
    90  */
    91 void xlat_flush_cache() 
    92 {
    93     xlat_cache_block_t tmp;
    94     int i;
    95     xlat_new_cache_ptr = xlat_new_cache;
    96     xlat_new_cache_ptr->active = 0;
    97     xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
    98     tmp = NEXT(xlat_new_cache_ptr);
    99     tmp->active = 1;
   100     tmp->size = 0;
   101 #ifdef XLAT_GENERATIONAL_CACHE
   102     xlat_temp_cache_ptr = xlat_temp_cache;
   103     xlat_temp_cache_ptr->active = 0;
   104     xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   105     tmp = NEXT(xlat_temp_cache_ptr);
   106     tmp->active = 1;
   107     tmp->size = 0;
   108     xlat_old_cache_ptr = xlat_old_cache;
   109     xlat_old_cache_ptr->active = 0;
   110     xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   111     tmp = NEXT(xlat_old_cache_ptr);
   112     tmp->active = 1;
   113     tmp->size = 0;
   114 #endif
   115     for( i=0; i<XLAT_LUT_PAGES; i++ ) {
   116         if( xlat_lut[i] != NULL ) {
   117             memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
   118         }
   119     }
   120 }
   122 static void xlat_flush_page_by_lut( void **page )
   123 {
   124     int i;
   125     for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
   126         if( IS_ENTRY_POINT(page[i]) ) {
   127             XLAT_BLOCK_FOR_CODE(page[i])->active = 0;
   128         }
   129         page[i] = NULL;
   130     }
   131 }
   133 void FASTCALL xlat_invalidate_word( sh4addr_t addr )
   134 {
   135     void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
   136     if( page != NULL ) {
   137         int entry = XLAT_LUT_ENTRY(addr);
   138         if( page[entry] != NULL ) {
   139             xlat_flush_page_by_lut(page);
   140         }
   141     }
   142 }
   144 void FASTCALL xlat_invalidate_long( sh4addr_t addr )
   145 {
   146     void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
   147     if( page != NULL ) {
   148         int entry = XLAT_LUT_ENTRY(addr);
   149         if( *(uint64_t *)&page[entry] != 0 ) {
   150             xlat_flush_page_by_lut(page);
   151         }
   152     }
   153 }
   155 void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
   156 {
   157     int i;
   158     int entry_count = size >> 1; // words;
   159     uint32_t page_no = XLAT_LUT_PAGE(address);
   160     int entry = XLAT_LUT_ENTRY(address);
   161     do {
   162         void **page = xlat_lut[page_no];
   163         int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
   164         if( entry_count < page_entries ) {
   165             page_entries = entry_count;
   166         }
   167         if( page != NULL ) {
   168             if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
   169                 /* Overwriting the entire page anyway */
   170                 xlat_flush_page_by_lut(page);
   171             } else {
   172                 for( i=entry; i<entry+page_entries; i++ ) {
   173                     if( page[i] != NULL ) {
   174                         xlat_flush_page_by_lut(page);
   175                         break;
   176                     }
   177                 }
   178             }
   179             entry_count -= page_entries;
   180         }
   181         page_no ++;
   182         entry_count -= page_entries;
   183         entry = 0;
   184     } while( entry_count > 0 );
   185 }
   187 void FASTCALL xlat_flush_page( sh4addr_t address )
   188 {
   189     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   190     if( page != NULL ) {
   191         xlat_flush_page_by_lut(page);
   192     }
   193 }
   195 void * FASTCALL xlat_get_code( sh4addr_t address )
   196 {
   197     void *result = NULL;
   198     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   199     if( page != NULL ) {
   200         result = (void *)(((uintptr_t)(page[XLAT_LUT_ENTRY(address)])) & (~((uintptr_t)0x03)));
   201     }
   202     return result;
   203 }
   205 xlat_recovery_record_t xlat_get_post_recovery( void *code, void *native_pc, gboolean with_terminal )
   206 {
   207     if( code != NULL ) {
   208         uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
   209         xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
   210         uint32_t count = block->recover_table_size;
   211         xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
   212         uint32_t posn;
   213         if( count > 0 && !with_terminal )
   214         	count--;
   215         if( records[count-1].xlat_offset < pc_offset ) {
   216         	return NULL;
   217         }
   218         for( posn=count-1; posn > 0; posn-- ) {
   219         	if( records[posn-1].xlat_offset < pc_offset ) {
   220         		return &records[posn];
   221         	}
   222         }
   223         return &records[0]; // shouldn't happen
   224     }
   225     return NULL;
   226 }
   228 xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
   229 {
   230     if( code != NULL ) {
   231         uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
   232         xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
   233         uint32_t count = block->recover_table_size;
   234         xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
   235         uint32_t posn;
   236         for( posn = 1; posn < count; posn++ ) {
   237         	if( records[posn].xlat_offset >= pc_offset ) {
   238         		return &records[posn-1];
   239         	}
   240         }
   241         return &records[count-1];
   242     }
   243     return NULL;	
   244 }
   246 void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
   247 {
   248     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   250     /* Add the LUT entry for the block */
   251     if( page == NULL ) {
   252         xlat_lut[XLAT_LUT_PAGE(address)] = page =
   253             mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
   254                     MAP_PRIVATE|MAP_ANON, -1, 0 );
   255         memset( page, 0, XLAT_LUT_PAGE_SIZE );
   256     }
   258     return &page[XLAT_LUT_ENTRY(address)];
   259 }
   263 uint32_t FASTCALL xlat_get_block_size( void *block )
   264 {
   265     xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
   266     return xlt->size;
   267 }
   269 uint32_t FASTCALL xlat_get_code_size( void *block )
   270 {
   271     xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
   272     if( xlt->recover_table_offset == 0 ) {
   273         return xlt->size;
   274     } else {
   275         return xlt->recover_table_offset;
   276     }
   277 }
   279 /**
   280  * Cut the specified block so that it has the given size, with the remaining data
   281  * forming a new free block. If the free block would be less than the minimum size,
   282  * the cut is not performed.
   283  * @return the next block after the (possibly cut) block.
   284  */
   285 static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
   286 {
   287     cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
   288     assert( cutsize <= block->size );
   289     if( block->size > cutsize + MIN_TOTAL_SIZE ) {
   290         int oldsize = block->size;
   291         block->size = cutsize;
   292         xlat_cache_block_t next = NEXT(block);
   293         next->active = 0;
   294         next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
   295         return next;
   296     } else {
   297         return NEXT(block);
   298     }
   299 }
   301 #ifdef XLAT_GENERATIONAL_CACHE
   302 /**
   303  * Promote a block in temp space (or elsewhere for that matter) to old space.
   304  *
   305  * @param block to promote.
   306  */
   307 static void xlat_promote_to_old_space( xlat_cache_block_t block )
   308 {
   309     int allocation = (int)-sizeof(struct xlat_cache_block);
   310     int size = block->size;
   311     xlat_cache_block_t curr = xlat_old_cache_ptr;
   312     xlat_cache_block_t start_block = curr;
   313     do {
   314         allocation += curr->size + sizeof(struct xlat_cache_block);
   315         curr = NEXT(curr);
   316         if( allocation > size ) {
   317             break; /* done */
   318         }
   319         if( curr->size == 0 ) { /* End-of-cache Sentinel */
   320             /* Leave what we just released as free space and start again from the
   321              * top of the cache
   322              */
   323             start_block->active = 0;
   324             start_block->size = allocation;
   325             allocation = (int)-sizeof(struct xlat_cache_block);
   326             start_block = curr = xlat_old_cache;
   327         }
   328     } while(1);
   329     start_block->active = 1;
   330     start_block->size = allocation;
   331     start_block->lut_entry = block->lut_entry;
   332     start_block->fpscr_mask = block->fpscr_mask;
   333     start_block->fpscr = block->fpscr;
   334     start_block->recover_table_offset = block->recover_table_offset;
   335     start_block->recover_table_size = block->recover_table_size;
   336     *block->lut_entry = &start_block->code;
   337     memcpy( start_block->code, block->code, block->size );
   338     xlat_old_cache_ptr = xlat_cut_block(start_block, size );
   339     if( xlat_old_cache_ptr->size == 0 ) {
   340         xlat_old_cache_ptr = xlat_old_cache;
   341     }
   342 }
   344 /**
   345  * Similarly to the above method, promotes a block to temp space.
   346  * TODO: Try to combine these - they're nearly identical
   347  */
   348 void xlat_promote_to_temp_space( xlat_cache_block_t block )
   349 {
   350     int size = block->size;
   351     int allocation = (int)-sizeof(struct xlat_cache_block);
   352     xlat_cache_block_t curr = xlat_temp_cache_ptr;
   353     xlat_cache_block_t start_block = curr;
   354     do {
   355         if( curr->active == BLOCK_USED ) {
   356             xlat_promote_to_old_space( curr );
   357         } else if( curr->active == BLOCK_ACTIVE ) {
   358             // Active but not used, release block
   359             *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
   360         }
   361         allocation += curr->size + sizeof(struct xlat_cache_block);
   362         curr = NEXT(curr);
   363         if( allocation > size ) {
   364             break; /* done */
   365         }
   366         if( curr->size == 0 ) { /* End-of-cache Sentinel */
   367             /* Leave what we just released as free space and start again from the
   368              * top of the cache
   369              */
   370             start_block->active = 0;
   371             start_block->size = allocation;
   372             allocation = (int)-sizeof(struct xlat_cache_block);
   373             start_block = curr = xlat_temp_cache;
   374         }
   375     } while(1);
   376     start_block->active = 1;
   377     start_block->size = allocation;
   378     start_block->lut_entry = block->lut_entry;
   379     start_block->fpscr_mask = block->fpscr_mask;
   380     start_block->fpscr = block->fpscr;
   381     start_block->recover_table_offset = block->recover_table_offset;
   382     start_block->recover_table_size = block->recover_table_size;
   383     *block->lut_entry = &start_block->code;
   384     memcpy( start_block->code, block->code, block->size );
   385     xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
   386     if( xlat_temp_cache_ptr->size == 0 ) {
   387         xlat_temp_cache_ptr = xlat_temp_cache;
   388     }
   390 }
   391 #else 
   392 void xlat_promote_to_temp_space( xlat_cache_block_t block )
   393 {
   394     *block->lut_entry = 0;
   395 }
   396 #endif
   398 /**
   399  * Returns the next block in the new cache list that can be written to by the
   400  * translator. If the next block is active, it is evicted first.
   401  */
   402 xlat_cache_block_t xlat_start_block( sh4addr_t address )
   403 {
   404     if( xlat_new_cache_ptr->size == 0 ) {
   405         xlat_new_cache_ptr = xlat_new_cache;
   406     }
   408     if( xlat_new_cache_ptr->active ) {
   409         xlat_promote_to_temp_space( xlat_new_cache_ptr );
   410     }
   411     xlat_new_create_ptr = xlat_new_cache_ptr;
   412     xlat_new_create_ptr->active = 1;
   413     xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   415     /* Add the LUT entry for the block */
   416     if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) {
   417         xlat_lut[XLAT_LUT_PAGE(address)] =
   418             mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
   419                     MAP_PRIVATE|MAP_ANON, -1, 0 );
   420         memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE );
   421     }
   423     if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) {
   424         xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_CODE(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]);
   425         oldblock->active = 0;
   426     }
   428     xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] = 
   429         &xlat_new_create_ptr->code;
   430     xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address);
   432     return xlat_new_create_ptr;
   433 }
   435 xlat_cache_block_t xlat_extend_block( uint32_t newSize )
   436 {
   437     while( xlat_new_create_ptr->size < newSize ) {
   438         if( xlat_new_cache_ptr->size == 0 ) {
   439             /* Migrate to the front of the cache to keep it contiguous */
   440             xlat_new_create_ptr->active = 0;
   441             sh4ptr_t olddata = xlat_new_create_ptr->code;
   442             int oldsize = xlat_new_create_ptr->size;
   443             int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
   444             void **lut_entry = xlat_new_create_ptr->lut_entry;
   445             int allocation = (int)-sizeof(struct xlat_cache_block);
   446             xlat_new_cache_ptr = xlat_new_cache;
   447             do {
   448                 if( xlat_new_cache_ptr->active ) {
   449                     xlat_promote_to_temp_space( xlat_new_cache_ptr );
   450                 }
   451                 allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
   452                 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   453             } while( allocation < size );
   454             xlat_new_create_ptr = xlat_new_cache;
   455             xlat_new_create_ptr->active = 1;
   456             xlat_new_create_ptr->size = allocation;
   457             xlat_new_create_ptr->lut_entry = lut_entry;
   458             *lut_entry = &xlat_new_create_ptr->code;
   459             memmove( xlat_new_create_ptr->code, olddata, oldsize );
   460         } else {
   461             if( xlat_new_cache_ptr->active ) {
   462                 xlat_promote_to_temp_space( xlat_new_cache_ptr );
   463             }
   464             xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
   465             xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   466         }
   467     }
   468     return xlat_new_create_ptr;
   470 }
   472 void xlat_commit_block( uint32_t destsize, uint32_t srcsize )
   473 {
   474     void **ptr = xlat_new_create_ptr->lut_entry;
   475     void **endptr = ptr + (srcsize>>2);
   476     while( ptr < endptr ) {
   477         if( *ptr == NULL ) {
   478             *ptr = XLAT_LUT_ENTRY_USED;
   479         }
   480         ptr++;
   481     }
   483     xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
   484 }
   486 void xlat_delete_block( xlat_cache_block_t block ) 
   487 {
   488     block->active = 0;
   489     *block->lut_entry = NULL;
   490 }
   492 void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
   493 {
   494     int foundptr = 0;
   495     xlat_cache_block_t tail = 
   496         (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
   498     assert( tail->active == 1 );
   499     assert( tail->size == 0 ); 
   500     while( cache < tail ) {
   501         assert( cache->active >= 0 && cache->active <= 2 );
   502         assert( cache->size >= 0 && cache->size < size );
   503         if( cache == ptr ) {
   504             foundptr = 1;
   505         }
   506         cache = NEXT(cache);
   507     }
   508     assert( cache == tail );
   509     assert( foundptr == 1 || tail == ptr );
   510 }
   512 void xlat_check_integrity( )
   513 {
   514     xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
   515 #ifdef XLAT_GENERATIONAL_CACHE
   516     xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
   517     xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
   518 #endif
   519 }
.