4 * Translation cache management. This part is architecture independent.
6 * Copyright (c) 2005 Nathan Keynes.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <sys/types.h>
23 #include "dreamcast.h"
24 #include "sh4/sh4core.h"
25 #include "sh4/xltcache.h"
26 #include "x86dasm/x86dasm.h"
28 #define XLAT_LUT_PAGE_BITS 12
29 #define XLAT_LUT_TOTAL_BITS 28
30 #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
31 #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
33 #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
34 #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
35 #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
37 #define XLAT_LUT_ENTRY_EMPTY (void *)0
38 #define XLAT_LUT_ENTRY_USED (void *)1
40 #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
41 #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
42 #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
44 #define MIN_BLOCK_SIZE 32
45 #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
47 #define BLOCK_INACTIVE 0
48 #define BLOCK_ACTIVE 1
51 xlat_cache_block_t xlat_new_cache;
52 xlat_cache_block_t xlat_new_cache_ptr;
53 xlat_cache_block_t xlat_new_create_ptr;
55 #ifdef XLAT_GENERATIONAL_CACHE
56 xlat_cache_block_t xlat_temp_cache;
57 xlat_cache_block_t xlat_temp_cache_ptr;
58 xlat_cache_block_t xlat_old_cache;
59 xlat_cache_block_t xlat_old_cache_ptr;
62 static void **xlat_lut[XLAT_LUT_PAGES];
63 static gboolean xlat_initialized = FALSE;
65 void xlat_cache_init(void)
67 if( !xlat_initialized ) {
68 xlat_initialized = TRUE;
69 xlat_new_cache = mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
70 MAP_PRIVATE|MAP_ANON, -1, 0 );
71 xlat_new_cache_ptr = xlat_new_cache;
72 xlat_new_create_ptr = xlat_new_cache;
73 #ifdef XLAT_GENERATIONAL_CACHE
74 xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
75 MAP_PRIVATE|MAP_ANON, -1, 0 );
76 xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
77 MAP_PRIVATE|MAP_ANON, -1, 0 );
78 xlat_temp_cache_ptr = xlat_temp_cache;
79 xlat_old_cache_ptr = xlat_old_cache;
81 // xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
82 // MAP_PRIVATE|MAP_ANON, -1, 0);
83 memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
89 * Reset the cache structure to its default state
91 void xlat_flush_cache()
93 xlat_cache_block_t tmp;
95 xlat_new_cache_ptr = xlat_new_cache;
96 xlat_new_cache_ptr->active = 0;
97 xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
98 tmp = NEXT(xlat_new_cache_ptr);
101 #ifdef XLAT_GENERATIONAL_CACHE
102 xlat_temp_cache_ptr = xlat_temp_cache;
103 xlat_temp_cache_ptr->active = 0;
104 xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
105 tmp = NEXT(xlat_temp_cache_ptr);
108 xlat_old_cache_ptr = xlat_old_cache;
109 xlat_old_cache_ptr->active = 0;
110 xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
111 tmp = NEXT(xlat_old_cache_ptr);
115 for( i=0; i<XLAT_LUT_PAGES; i++ ) {
116 if( xlat_lut[i] != NULL ) {
117 memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
122 static void xlat_flush_page_by_lut( void **page )
125 for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
126 if( IS_ENTRY_POINT(page[i]) ) {
127 XLAT_BLOCK_FOR_CODE(page[i])->active = 0;
133 void FASTCALL xlat_invalidate_word( sh4addr_t addr )
135 void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
137 int entry = XLAT_LUT_ENTRY(addr);
138 if( page[entry] != NULL ) {
139 xlat_flush_page_by_lut(page);
144 void FASTCALL xlat_invalidate_long( sh4addr_t addr )
146 void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
148 int entry = XLAT_LUT_ENTRY(addr);
149 if( *(uint64_t *)&page[entry] != 0 ) {
150 xlat_flush_page_by_lut(page);
155 void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
158 int entry_count = size >> 1; // words;
159 uint32_t page_no = XLAT_LUT_PAGE(address);
160 int entry = XLAT_LUT_ENTRY(address);
162 void **page = xlat_lut[page_no];
163 int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
164 if( entry_count < page_entries ) {
165 page_entries = entry_count;
168 if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
169 /* Overwriting the entire page anyway */
170 xlat_flush_page_by_lut(page);
172 for( i=entry; i<entry+page_entries; i++ ) {
173 if( page[i] != NULL ) {
174 xlat_flush_page_by_lut(page);
179 entry_count -= page_entries;
182 entry_count -= page_entries;
184 } while( entry_count > 0 );
187 void FASTCALL xlat_flush_page( sh4addr_t address )
189 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
191 xlat_flush_page_by_lut(page);
195 void * FASTCALL xlat_get_code( sh4addr_t address )
198 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
200 result = (void *)(((uintptr_t)(page[XLAT_LUT_ENTRY(address)])) & (~((uintptr_t)0x03)));
205 xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
208 uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
209 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
210 uint32_t count = block->recover_table_size;
211 xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
213 for( posn = 1; posn < count; posn++ ) {
214 if( records[posn].xlat_offset >= pc_offset ) {
215 return &records[posn-1];
218 return &records[count-1];
223 void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
225 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
227 /* Add the LUT entry for the block */
229 xlat_lut[XLAT_LUT_PAGE(address)] = page =
230 mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
231 MAP_PRIVATE|MAP_ANON, -1, 0 );
232 memset( page, 0, XLAT_LUT_PAGE_SIZE );
235 return &page[XLAT_LUT_ENTRY(address)];
240 uint32_t FASTCALL xlat_get_block_size( void *block )
242 xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
246 uint32_t FASTCALL xlat_get_code_size( void *block )
248 xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
249 if( xlt->recover_table_offset == 0 ) {
252 return xlt->recover_table_offset;
257 * Cut the specified block so that it has the given size, with the remaining data
258 * forming a new free block. If the free block would be less than the minimum size,
259 * the cut is not performed.
260 * @return the next block after the (possibly cut) block.
262 static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
264 cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
265 assert( cutsize <= block->size );
266 if( block->size > cutsize + MIN_TOTAL_SIZE ) {
267 int oldsize = block->size;
268 block->size = cutsize;
269 xlat_cache_block_t next = NEXT(block);
271 next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
278 #ifdef XLAT_GENERATIONAL_CACHE
280 * Promote a block in temp space (or elsewhere for that matter) to old space.
282 * @param block to promote.
284 static void xlat_promote_to_old_space( xlat_cache_block_t block )
286 int allocation = (int)-sizeof(struct xlat_cache_block);
287 int size = block->size;
288 xlat_cache_block_t curr = xlat_old_cache_ptr;
289 xlat_cache_block_t start_block = curr;
291 allocation += curr->size + sizeof(struct xlat_cache_block);
293 if( allocation > size ) {
296 if( curr->size == 0 ) { /* End-of-cache Sentinel */
297 /* Leave what we just released as free space and start again from the
300 start_block->active = 0;
301 start_block->size = allocation;
302 allocation = (int)-sizeof(struct xlat_cache_block);
303 start_block = curr = xlat_old_cache;
306 start_block->active = 1;
307 start_block->size = allocation;
308 start_block->lut_entry = block->lut_entry;
309 start_block->fpscr_mask = block->fpscr_mask;
310 start_block->fpscr = block->fpscr;
311 start_block->recover_table_offset = block->recover_table_offset;
312 start_block->recover_table_size = block->recover_table_size;
313 *block->lut_entry = &start_block->code;
314 memcpy( start_block->code, block->code, block->size );
315 xlat_old_cache_ptr = xlat_cut_block(start_block, size );
316 if( xlat_old_cache_ptr->size == 0 ) {
317 xlat_old_cache_ptr = xlat_old_cache;
322 * Similarly to the above method, promotes a block to temp space.
323 * TODO: Try to combine these - they're nearly identical
325 void xlat_promote_to_temp_space( xlat_cache_block_t block )
327 int size = block->size;
328 int allocation = (int)-sizeof(struct xlat_cache_block);
329 xlat_cache_block_t curr = xlat_temp_cache_ptr;
330 xlat_cache_block_t start_block = curr;
332 if( curr->active == BLOCK_USED ) {
333 xlat_promote_to_old_space( curr );
334 } else if( curr->active == BLOCK_ACTIVE ) {
335 // Active but not used, release block
336 *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
338 allocation += curr->size + sizeof(struct xlat_cache_block);
340 if( allocation > size ) {
343 if( curr->size == 0 ) { /* End-of-cache Sentinel */
344 /* Leave what we just released as free space and start again from the
347 start_block->active = 0;
348 start_block->size = allocation;
349 allocation = (int)-sizeof(struct xlat_cache_block);
350 start_block = curr = xlat_temp_cache;
353 start_block->active = 1;
354 start_block->size = allocation;
355 start_block->lut_entry = block->lut_entry;
356 start_block->fpscr_mask = block->fpscr_mask;
357 start_block->fpscr = block->fpscr;
358 start_block->recover_table_offset = block->recover_table_offset;
359 start_block->recover_table_size = block->recover_table_size;
360 *block->lut_entry = &start_block->code;
361 memcpy( start_block->code, block->code, block->size );
362 xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
363 if( xlat_temp_cache_ptr->size == 0 ) {
364 xlat_temp_cache_ptr = xlat_temp_cache;
369 void xlat_promote_to_temp_space( xlat_cache_block_t block )
371 *block->lut_entry = 0;
376 * Returns the next block in the new cache list that can be written to by the
377 * translator. If the next block is active, it is evicted first.
379 xlat_cache_block_t xlat_start_block( sh4addr_t address )
381 if( xlat_new_cache_ptr->size == 0 ) {
382 xlat_new_cache_ptr = xlat_new_cache;
385 if( xlat_new_cache_ptr->active ) {
386 xlat_promote_to_temp_space( xlat_new_cache_ptr );
388 xlat_new_create_ptr = xlat_new_cache_ptr;
389 xlat_new_create_ptr->active = 1;
390 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
392 /* Add the LUT entry for the block */
393 if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) {
394 xlat_lut[XLAT_LUT_PAGE(address)] =
395 mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
396 MAP_PRIVATE|MAP_ANON, -1, 0 );
397 memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE );
400 if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) {
401 xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_CODE(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]);
402 oldblock->active = 0;
405 xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] =
406 &xlat_new_create_ptr->code;
407 xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address);
409 return xlat_new_create_ptr;
412 xlat_cache_block_t xlat_extend_block( uint32_t newSize )
414 while( xlat_new_create_ptr->size < newSize ) {
415 if( xlat_new_cache_ptr->size == 0 ) {
416 /* Migrate to the front of the cache to keep it contiguous */
417 xlat_new_create_ptr->active = 0;
418 sh4ptr_t olddata = xlat_new_create_ptr->code;
419 int oldsize = xlat_new_create_ptr->size;
420 int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
421 void **lut_entry = xlat_new_create_ptr->lut_entry;
422 int allocation = (int)-sizeof(struct xlat_cache_block);
423 xlat_new_cache_ptr = xlat_new_cache;
425 if( xlat_new_cache_ptr->active ) {
426 xlat_promote_to_temp_space( xlat_new_cache_ptr );
428 allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
429 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
430 } while( allocation < size );
431 xlat_new_create_ptr = xlat_new_cache;
432 xlat_new_create_ptr->active = 1;
433 xlat_new_create_ptr->size = allocation;
434 xlat_new_create_ptr->lut_entry = lut_entry;
435 *lut_entry = &xlat_new_create_ptr->code;
436 memmove( xlat_new_create_ptr->code, olddata, oldsize );
438 if( xlat_new_cache_ptr->active ) {
439 xlat_promote_to_temp_space( xlat_new_cache_ptr );
441 xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
442 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
445 return xlat_new_create_ptr;
449 void xlat_commit_block( uint32_t destsize, uint32_t srcsize )
451 void **ptr = xlat_new_create_ptr->lut_entry;
452 void **endptr = ptr + (srcsize>>2);
453 while( ptr < endptr ) {
455 *ptr = XLAT_LUT_ENTRY_USED;
460 xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
463 void xlat_delete_block( xlat_cache_block_t block )
466 *block->lut_entry = NULL;
469 void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
472 xlat_cache_block_t tail =
473 (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
475 assert( tail->active == 1 );
476 assert( tail->size == 0 );
477 while( cache < tail ) {
478 assert( cache->active >= 0 && cache->active <= 2 );
479 assert( cache->size >= 0 && cache->size < size );
485 assert( cache == tail );
486 assert( foundptr == 1 || tail == ptr );
489 void xlat_check_integrity( )
491 xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
492 #ifdef XLAT_GENERATIONAL_CACHE
493 xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
494 xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
.