Search
lxdream.org :: lxdream/src/sh4/xltcache.c :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/xltcache.c
changeset 359:c588dce7ebde
next366:6fb0d05152d7
author nkeynes
date Tue Aug 28 08:46:14 2007 +0000 (16 years ago)
permissions -rw-r--r--
last change Translator WIP: fill out and correct another batch of instructions
file annotate diff log raw
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/src/sh4/xltcache.c Tue Aug 28 08:46:14 2007 +0000
1.3 @@ -0,0 +1,347 @@
1.4 +/**
1.5 + * $Id: xltcache.c,v 1.1 2007-08-23 12:33:27 nkeynes Exp $
1.6 + *
1.7 + * Translation cache management. This part is architecture independent.
1.8 + *
1.9 + * Copyright (c) 2005 Nathan Keynes.
1.10 + *
1.11 + * This program is free software; you can redistribute it and/or modify
1.12 + * it under the terms of the GNU General Public License as published by
1.13 + * the Free Software Foundation; either version 2 of the License, or
1.14 + * (at your option) any later version.
1.15 + *
1.16 + * This program is distributed in the hope that it will be useful,
1.17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1.18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.19 + * GNU General Public License for more details.
1.20 + */
1.21 +
1.22 +#include "sh4/xltcache.h"
1.23 +#include "dreamcast.h"
1.24 +#include <sys/mman.h>
1.25 +#include <assert.h>
1.26 +
1.27 +#define XLAT_LUT_PAGE_BITS 12
1.28 +#define XLAT_LUT_TOTAL_BITS 28
1.29 +#define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
1.30 +#define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
1.31 +
1.32 +#define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
1.33 +#define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
1.34 +#define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
1.35 +
1.36 +#define XLAT_LUT_ENTRY_EMPTY (void *)0
1.37 +#define XLAT_LUT_ENTRY_USED (void *)1
1.38 +
1.39 +#define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
1.40 +#define BLOCK_FOR_CODE(code) (((xlat_cache_block_t)code)-1)
1.41 +#define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
1.42 +#define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
1.43 +
1.44 +#define MIN_BLOCK_SIZE 32
1.45 +#define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
1.46 +
1.47 +#define BLOCK_INACTIVE 0
1.48 +#define BLOCK_ACTIVE 1
1.49 +#define BLOCK_USED 2
1.50 +
1.51 +xlat_cache_block_t xlat_new_cache;
1.52 +xlat_cache_block_t xlat_new_cache_ptr;
1.53 +xlat_cache_block_t xlat_new_create_ptr;
1.54 +xlat_cache_block_t xlat_temp_cache;
1.55 +xlat_cache_block_t xlat_temp_cache_ptr;
1.56 +xlat_cache_block_t xlat_old_cache;
1.57 +xlat_cache_block_t xlat_old_cache_ptr;
1.58 +static void ***xlat_lut;
1.59 +static void **xlat_lut2; /* second-tier page info */
1.60 +
1.61 +void xlat_cache_init()
1.62 +{
1.63 + xlat_new_cache = mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
1.64 + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
1.65 + xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
1.66 + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
1.67 + xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
1.68 + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
1.69 + xlat_new_cache_ptr = xlat_new_cache;
1.70 + xlat_temp_cache_ptr = xlat_temp_cache;
1.71 + xlat_old_cache_ptr = xlat_old_cache;
1.72 + xlat_new_create_ptr = xlat_new_cache;
1.73 +
1.74 + xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
1.75 + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1.76 + memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
1.77 +
1.78 + xlat_flush_cache();
1.79 +}
1.80 +
1.81 +/**
1.82 + * Reset the cache structure to its default state
1.83 + */
1.84 +void xlat_flush_cache()
1.85 +{
1.86 + xlat_cache_block_t tmp;
1.87 + int i;
1.88 + xlat_new_cache_ptr = xlat_new_cache;
1.89 + xlat_new_cache_ptr->active = 0;
1.90 + xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
1.91 + tmp = NEXT(xlat_new_cache_ptr);
1.92 + tmp->active = 1;
1.93 + tmp->size = 0;
1.94 + xlat_temp_cache_ptr = xlat_temp_cache;
1.95 + xlat_temp_cache_ptr->active = 0;
1.96 + xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
1.97 + tmp = NEXT(xlat_temp_cache_ptr);
1.98 + tmp->active = 1;
1.99 + tmp->size = 0;
1.100 + xlat_old_cache_ptr = xlat_old_cache;
1.101 + xlat_old_cache_ptr->active = 0;
1.102 + xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
1.103 + tmp = NEXT(xlat_old_cache_ptr);
1.104 + tmp->active = 1;
1.105 + tmp->size = 0;
1.106 + for( i=0; i<XLAT_LUT_PAGES; i++ ) {
1.107 + if( xlat_lut[i] != NULL ) {
1.108 + memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
1.109 + }
1.110 + }
1.111 +}
1.112 +
1.113 +void xlat_flush_page( sh4addr_t address )
1.114 +{
1.115 + int i;
1.116 + void **page = xlat_lut[XLAT_LUT_PAGE(address)];
1.117 + for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
1.118 + if( IS_ENTRY_POINT(page[i]) ) {
1.119 + BLOCK_FOR_CODE(page[i])->active = 0;
1.120 + }
1.121 + page[i] = NULL;
1.122 + }
1.123 +}
1.124 +
1.125 +void *xlat_get_code( sh4addr_t address )
1.126 +{
1.127 + void **page = xlat_lut[XLAT_LUT_PAGE(address)];
1.128 + if( page == NULL ) {
1.129 + return NULL;
1.130 + }
1.131 + return page[XLAT_LUT_ENTRY(address)];
1.132 +}
1.133 +
1.134 +/**
1.135 + * Cut the specified block so that it has the given size, with the remaining data
1.136 + * forming a new free block. If the free block would be less than the minimum size,
1.137 + * the cut is not performed.
1.138 + * @return the next block after the (possibly cut) block.
1.139 + */
1.140 +static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
1.141 +{
1.142 + if( block->size > cutsize + MIN_TOTAL_SIZE ) {
1.143 + int oldsize = block->size;
1.144 + block->size = cutsize;
1.145 + xlat_cache_block_t next = NEXT(block);
1.146 + next->active = 0;
1.147 + next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
1.148 + return next;
1.149 + } else {
1.150 + return NEXT(block);
1.151 + }
1.152 +}
1.153 +
1.154 +/**
1.155 + * Promote a block in temp space (or elsewhere for that matter) to old space.
1.156 + *
1.157 + * @param block to promote.
1.158 + */
1.159 +static void xlat_promote_to_old_space( xlat_cache_block_t block )
1.160 +{
1.161 + int allocation = -sizeof(struct xlat_cache_block);
1.162 + int size = block->size;
1.163 + xlat_cache_block_t curr = xlat_old_cache_ptr;
1.164 + xlat_cache_block_t start_block = curr;
1.165 + do {
1.166 + allocation += curr->size + sizeof(struct xlat_cache_block);
1.167 + curr = NEXT(curr);
1.168 + if( allocation > size ) {
1.169 + break; /* done */
1.170 + }
1.171 + if( curr->size == 0 ) { /* End-of-cache Sentinel */
1.172 + /* Leave what we just released as free space and start again from the
1.173 + * top of the cache
1.174 + */
1.175 + start_block->active = 0;
1.176 + start_block->size = allocation;
1.177 + allocation = -sizeof(struct xlat_cache_block);
1.178 + start_block = curr = xlat_old_cache;
1.179 + }
1.180 + } while(1);
1.181 + start_block->active = 1;
1.182 + start_block->size = allocation;
1.183 + start_block->lut_entry = block->lut_entry;
1.184 + *block->lut_entry = &start_block->code;
1.185 + memcpy( start_block->code, block->code, block->size );
1.186 + xlat_old_cache_ptr = xlat_cut_block(start_block, size );
1.187 + if( xlat_old_cache_ptr->size == 0 ) {
1.188 + xlat_old_cache_ptr = xlat_old_cache;
1.189 + }
1.190 +}
1.191 +
1.192 +/**
1.193 + * Similarly to the above method, promotes a block to temp space.
1.194 + * TODO: Try to combine these - they're nearly identical
1.195 + */
1.196 +void xlat_promote_to_temp_space( xlat_cache_block_t block )
1.197 +{
1.198 + int size = block->size;
1.199 + int allocation = -sizeof(struct xlat_cache_block);
1.200 + xlat_cache_block_t curr = xlat_temp_cache_ptr;
1.201 + xlat_cache_block_t start_block = curr;
1.202 + do {
1.203 + if( curr->active == BLOCK_USED ) {
1.204 + xlat_promote_to_old_space( curr );
1.205 + }
1.206 + allocation += curr->size + sizeof(struct xlat_cache_block);
1.207 + curr = NEXT(curr);
1.208 + if( allocation > size ) {
1.209 + break; /* done */
1.210 + }
1.211 + if( curr->size == 0 ) { /* End-of-cache Sentinel */
1.212 + /* Leave what we just released as free space and start again from the
1.213 + * top of the cache
1.214 + */
1.215 + start_block->active = 0;
1.216 + start_block->size = allocation;
1.217 + allocation = -sizeof(struct xlat_cache_block);
1.218 + start_block = curr = xlat_temp_cache;
1.219 + }
1.220 + } while(1);
1.221 + start_block->active = 1;
1.222 + start_block->size = allocation;
1.223 + start_block->lut_entry = block->lut_entry;
1.224 + *block->lut_entry = &start_block->code;
1.225 + memcpy( start_block->code, block->code, block->size );
1.226 + xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
1.227 + if( xlat_temp_cache_ptr->size == 0 ) {
1.228 + xlat_temp_cache_ptr = xlat_temp_cache;
1.229 + }
1.230 +
1.231 +}
1.232 +
1.233 +/**
1.234 + * Returns the next block in the new cache list that can be written to by the
1.235 + * translator. If the next block is active, it is evicted first.
1.236 + */
1.237 +xlat_cache_block_t xlat_start_block( sh4addr_t address )
1.238 +{
1.239 + if( xlat_new_cache_ptr->size == 0 ) {
1.240 + xlat_new_cache_ptr = xlat_new_cache;
1.241 + }
1.242 +
1.243 + if( xlat_new_cache_ptr->active ) {
1.244 + xlat_promote_to_temp_space( xlat_new_cache_ptr );
1.245 + }
1.246 + xlat_new_create_ptr = xlat_new_cache_ptr;
1.247 + xlat_new_create_ptr->active = 1;
1.248 + xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
1.249 +
1.250 + /* Add the LUT entry for the block */
1.251 + if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) {
1.252 + xlat_lut[XLAT_LUT_PAGE(address)] =
1.253 + mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
1.254 + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
1.255 + memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE );
1.256 + }
1.257 +
1.258 + if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) {
1.259 + xlat_cache_block_t oldblock = BLOCK_FOR_CODE(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]);
1.260 + oldblock->active = 0;
1.261 + }
1.262 +
1.263 + xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] =
1.264 + &xlat_new_create_ptr->code;
1.265 + xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address);
1.266 +
1.267 + return xlat_new_create_ptr;
1.268 +}
1.269 +
1.270 +xlat_cache_block_t xlat_extend_block()
1.271 +{
1.272 + if( xlat_new_cache_ptr->size == 0 ) {
1.273 + /* Migrate to the front of the cache to keep it contiguous */
1.274 + xlat_new_create_ptr->active = 0;
1.275 + char *olddata = xlat_new_create_ptr->code;
1.276 + int oldsize = xlat_new_create_ptr->size;
1.277 + int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
1.278 + void **lut_entry = xlat_new_create_ptr->lut_entry;
1.279 + int allocation = -sizeof(struct xlat_cache_block);
1.280 + xlat_new_cache_ptr = xlat_new_cache;
1.281 + do {
1.282 + if( xlat_new_cache_ptr->active ) {
1.283 + xlat_promote_to_temp_space( xlat_new_cache_ptr );
1.284 + }
1.285 + allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
1.286 + xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
1.287 + } while( allocation < size );
1.288 + xlat_new_create_ptr = xlat_new_cache;
1.289 + xlat_new_create_ptr->active = 1;
1.290 + xlat_new_create_ptr->size = allocation;
1.291 + xlat_new_create_ptr->lut_entry = lut_entry;
1.292 + *lut_entry = &xlat_new_create_ptr->code;
1.293 + memmove( xlat_new_create_ptr->code, olddata, oldsize );
1.294 + } else {
1.295 + if( xlat_new_cache_ptr->active ) {
1.296 + xlat_promote_to_temp_space( xlat_new_cache_ptr );
1.297 + }
1.298 + xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
1.299 + xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
1.300 + }
1.301 + return xlat_new_create_ptr;
1.302 +
1.303 +}
1.304 +
1.305 +void xlat_commit_block( uint32_t destsize, uint32_t srcsize )
1.306 +{
1.307 + void **ptr = xlat_new_create_ptr->lut_entry;
1.308 + void **endptr = ptr + (srcsize>>2);
1.309 + while( ptr < endptr ) {
1.310 + if( *ptr == NULL ) {
1.311 + *ptr = XLAT_LUT_ENTRY_USED;
1.312 + }
1.313 + ptr++;
1.314 + }
1.315 +
1.316 + xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
1.317 +}
1.318 +
1.319 +void xlat_delete_block( xlat_cache_block_t block )
1.320 +{
1.321 + block->active = 0;
1.322 + *block->lut_entry = NULL;
1.323 +}
1.324 +
1.325 +void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
1.326 +{
1.327 + int foundptr = 0;
1.328 + xlat_cache_block_t tail =
1.329 + (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
1.330 +
1.331 + assert( tail->active == 1 );
1.332 + assert( tail->size == 0 );
1.333 + while( cache < tail ) {
1.334 + assert( cache->active >= 0 && cache->active <= 2 );
1.335 + assert( cache->size >= 0 && cache->size < size );
1.336 + if( cache == ptr ) {
1.337 + foundptr = 1;
1.338 + }
1.339 + cache = NEXT(cache);
1.340 + }
1.341 + assert( cache == tail );
1.342 + assert( foundptr == 1 );
1.343 +}
1.344 +
1.345 +void xlat_check_integrity( )
1.346 +{
1.347 + xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
1.348 + xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
1.349 + xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
1.350 +}
.