filename | src/sh4/xltcache.c |
changeset | 410:5f8413358e7f |
prev | 407:d24ab36150c4 |
next | 422:61a0598e07ff |
author | nkeynes |
date | Sat Sep 29 11:06:40 2007 +0000 (14 years ago) |
permissions | -rw-r--r-- |
last change | Change extend-block to take a requested size Terminate blocks on page boundaries for easier invalidation |
view | annotate | diff | log | raw |
1 /**
2 * $Id: xltcache.c,v 1.7 2007-09-29 11:06:40 nkeynes Exp $
3 *
4 * Translation cache management. This part is architecture independent.
5 *
6 * Copyright (c) 2005 Nathan Keynes.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
19 #include "sh4/xltcache.h"
20 #include "dreamcast.h"
21 #include <sys/mman.h>
22 #include <assert.h>
24 #define XLAT_LUT_PAGE_BITS 12
25 #define XLAT_LUT_TOTAL_BITS 28
26 #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
27 #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
29 #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
30 #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
31 #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
33 #define XLAT_LUT_ENTRY_EMPTY (void *)0
34 #define XLAT_LUT_ENTRY_USED (void *)1
36 #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
37 #define BLOCK_FOR_CODE(code) (((xlat_cache_block_t)code)-1)
38 #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
39 #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
41 #define MIN_BLOCK_SIZE 32
42 #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
44 #define BLOCK_INACTIVE 0
45 #define BLOCK_ACTIVE 1
46 #define BLOCK_USED 2
48 xlat_cache_block_t xlat_new_cache;
49 xlat_cache_block_t xlat_new_cache_ptr;
50 xlat_cache_block_t xlat_new_create_ptr;
51 xlat_cache_block_t xlat_temp_cache;
52 xlat_cache_block_t xlat_temp_cache_ptr;
53 xlat_cache_block_t xlat_old_cache;
54 xlat_cache_block_t xlat_old_cache_ptr;
55 static void ***xlat_lut;
56 static gboolean xlat_initialized = FALSE;
58 void xlat_cache_init()
59 {
60 if( !xlat_initialized ) {
61 xlat_initialized = TRUE;
62 xlat_new_cache = mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
63 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
64 xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
65 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
66 xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
67 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
68 xlat_new_cache_ptr = xlat_new_cache;
69 xlat_temp_cache_ptr = xlat_temp_cache;
70 xlat_old_cache_ptr = xlat_old_cache;
71 xlat_new_create_ptr = xlat_new_cache;
73 xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
74 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
75 memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
76 }
77 xlat_flush_cache();
78 }
80 void xlat_print_free( FILE *out )
81 {
82 fprintf( out, "New space: %d\nTemp space: %d\nOld space: %d\n",
83 xlat_new_cache_ptr->size, xlat_temp_cache_ptr->size, xlat_old_cache_ptr->size );
84 }
86 /**
87 * Reset the cache structure to its default state
88 */
89 void xlat_flush_cache()
90 {
91 xlat_cache_block_t tmp;
92 int i;
93 xlat_new_cache_ptr = xlat_new_cache;
94 xlat_new_cache_ptr->active = 0;
95 xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
96 tmp = NEXT(xlat_new_cache_ptr);
97 tmp->active = 1;
98 tmp->size = 0;
99 xlat_temp_cache_ptr = xlat_temp_cache;
100 xlat_temp_cache_ptr->active = 0;
101 xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
102 tmp = NEXT(xlat_temp_cache_ptr);
103 tmp->active = 1;
104 tmp->size = 0;
105 xlat_old_cache_ptr = xlat_old_cache;
106 xlat_old_cache_ptr->active = 0;
107 xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
108 tmp = NEXT(xlat_old_cache_ptr);
109 tmp->active = 1;
110 tmp->size = 0;
111 for( i=0; i<XLAT_LUT_PAGES; i++ ) {
112 if( xlat_lut[i] != NULL ) {
113 memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
114 }
115 }
116 }
118 static void xlat_flush_page_by_lut( void **page )
119 {
120 int i;
121 for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
122 if( IS_ENTRY_POINT(page[i]) ) {
123 BLOCK_FOR_CODE(page[i])->active = 0;
124 }
125 page[i] = NULL;
126 }
127 }
129 void xlat_invalidate_word( sh4addr_t addr )
130 {
131 if( xlat_lut ) {
132 void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
133 if( page != NULL ) {
134 int entry = XLAT_LUT_ENTRY(addr);
135 if( page[entry] != NULL ) {
136 xlat_flush_page_by_lut(page);
137 }
138 }
139 }
140 }
142 void xlat_invalidate_long( sh4addr_t addr )
143 {
144 if( xlat_lut ) {
145 void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
146 if( page != NULL ) {
147 int entry = XLAT_LUT_ENTRY(addr);
148 if( page[entry] != NULL || page[entry+1] != NULL ) {
149 xlat_flush_page_by_lut(page);
150 }
151 }
152 }
153 }
155 void xlat_invalidate_block( sh4addr_t address, size_t size )
156 {
157 int i;
158 int entry_count = size >> 1; // words;
159 uint32_t page_no = XLAT_LUT_PAGE(address);
160 int entry = XLAT_LUT_ENTRY(address);
161 if( xlat_lut ) {
162 do {
163 void **page = xlat_lut[page_no];
164 int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
165 if( entry_count < page_entries ) {
166 page_entries = entry_count;
167 }
168 if( page != NULL ) {
169 if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
170 /* Overwriting the entire page anyway */
171 xlat_flush_page_by_lut(page);
172 } else {
173 for( i=entry; i<entry+page_entries; i++ ) {
174 if( page[i] != NULL ) {
175 xlat_flush_page_by_lut(page);
176 break;
177 }
178 }
179 }
180 entry_count -= page_entries;
181 }
182 page_no ++;
183 entry_count -= page_entries;
184 entry = 0;
185 } while( entry_count > 0 );
186 }
187 }
189 void xlat_flush_page( sh4addr_t address )
190 {
191 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
192 if( page != NULL ) {
193 xlat_flush_page_by_lut(page);
194 }
195 }
197 void *xlat_get_code( sh4addr_t address )
198 {
199 void *result = NULL;
200 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
201 if( page != NULL ) {
202 result = (void *)(((uint32_t)(page[XLAT_LUT_ENTRY(address)])) & 0xFFFFFFFC);
203 }
204 return result;
205 }
207 void **xlat_get_lut_entry( sh4addr_t address )
208 {
209 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
211 /* Add the LUT entry for the block */
212 if( page == NULL ) {
213 xlat_lut[XLAT_LUT_PAGE(address)] = page =
214 mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
215 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
216 memset( page, 0, XLAT_LUT_PAGE_SIZE );
217 }
219 return &page[XLAT_LUT_ENTRY(address)];
220 }
224 uint32_t xlat_get_block_size( void *block )
225 {
226 xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
227 return xlt->size;
228 }
230 /**
231 * Cut the specified block so that it has the given size, with the remaining data
232 * forming a new free block. If the free block would be less than the minimum size,
233 * the cut is not performed.
234 * @return the next block after the (possibly cut) block.
235 */
236 static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
237 {
238 cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
239 assert( cutsize <= block->size );
240 if( block->size > cutsize + MIN_TOTAL_SIZE ) {
241 int oldsize = block->size;
242 block->size = cutsize;
243 xlat_cache_block_t next = NEXT(block);
244 next->active = 0;
245 next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
246 return next;
247 } else {
248 return NEXT(block);
249 }
250 }
252 /**
253 * Promote a block in temp space (or elsewhere for that matter) to old space.
254 *
255 * @param block to promote.
256 */
257 static void xlat_promote_to_old_space( xlat_cache_block_t block )
258 {
259 int allocation = -sizeof(struct xlat_cache_block);
260 int size = block->size;
261 xlat_cache_block_t curr = xlat_old_cache_ptr;
262 xlat_cache_block_t start_block = curr;
263 do {
264 allocation += curr->size + sizeof(struct xlat_cache_block);
265 curr = NEXT(curr);
266 if( allocation > size ) {
267 break; /* done */
268 }
269 if( curr->size == 0 ) { /* End-of-cache Sentinel */
270 /* Leave what we just released as free space and start again from the
271 * top of the cache
272 */
273 start_block->active = 0;
274 start_block->size = allocation;
275 allocation = -sizeof(struct xlat_cache_block);
276 start_block = curr = xlat_old_cache;
277 }
278 } while(1);
279 start_block->active = 1;
280 start_block->size = allocation;
281 start_block->lut_entry = block->lut_entry;
282 *block->lut_entry = &start_block->code;
283 memcpy( start_block->code, block->code, block->size );
284 xlat_old_cache_ptr = xlat_cut_block(start_block, size );
285 if( xlat_old_cache_ptr->size == 0 ) {
286 xlat_old_cache_ptr = xlat_old_cache;
287 }
288 }
290 /**
291 * Similarly to the above method, promotes a block to temp space.
292 * TODO: Try to combine these - they're nearly identical
293 */
294 void xlat_promote_to_temp_space( xlat_cache_block_t block )
295 {
296 int size = block->size;
297 int allocation = -sizeof(struct xlat_cache_block);
298 xlat_cache_block_t curr = xlat_temp_cache_ptr;
299 xlat_cache_block_t start_block = curr;
300 do {
301 if( curr->active == BLOCK_USED ) {
302 xlat_promote_to_old_space( curr );
303 }
304 allocation += curr->size + sizeof(struct xlat_cache_block);
305 curr = NEXT(curr);
306 if( allocation > size ) {
307 break; /* done */
308 }
309 if( curr->size == 0 ) { /* End-of-cache Sentinel */
310 /* Leave what we just released as free space and start again from the
311 * top of the cache
312 */
313 start_block->active = 0;
314 start_block->size = allocation;
315 allocation = -sizeof(struct xlat_cache_block);
316 start_block = curr = xlat_temp_cache;
317 }
318 } while(1);
319 start_block->active = 1;
320 start_block->size = allocation;
321 start_block->lut_entry = block->lut_entry;
322 *block->lut_entry = &start_block->code;
323 memcpy( start_block->code, block->code, block->size );
324 xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
325 if( xlat_temp_cache_ptr->size == 0 ) {
326 xlat_temp_cache_ptr = xlat_temp_cache;
327 }
329 }
331 /**
332 * Returns the next block in the new cache list that can be written to by the
333 * translator. If the next block is active, it is evicted first.
334 */
335 xlat_cache_block_t xlat_start_block( sh4addr_t address )
336 {
337 if( xlat_new_cache_ptr->size == 0 ) {
338 xlat_new_cache_ptr = xlat_new_cache;
339 }
341 if( xlat_new_cache_ptr->active ) {
342 xlat_promote_to_temp_space( xlat_new_cache_ptr );
343 }
344 xlat_new_create_ptr = xlat_new_cache_ptr;
345 xlat_new_create_ptr->active = 1;
346 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
348 /* Add the LUT entry for the block */
349 if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) {
350 xlat_lut[XLAT_LUT_PAGE(address)] =
351 mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
352 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
353 memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE );
354 }
356 if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) {
357 xlat_cache_block_t oldblock = BLOCK_FOR_CODE(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]);
358 oldblock->active = 0;
359 }
361 xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] =
362 &xlat_new_create_ptr->code;
363 xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address);
365 return xlat_new_create_ptr;
366 }
368 xlat_cache_block_t xlat_extend_block( uint32_t newSize )
369 {
370 while( xlat_new_create_ptr->size < newSize ) {
371 if( xlat_new_cache_ptr->size == 0 ) {
372 /* Migrate to the front of the cache to keep it contiguous */
373 xlat_new_create_ptr->active = 0;
374 char *olddata = xlat_new_create_ptr->code;
375 int oldsize = xlat_new_create_ptr->size;
376 int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
377 void **lut_entry = xlat_new_create_ptr->lut_entry;
378 int allocation = -sizeof(struct xlat_cache_block);
379 xlat_new_cache_ptr = xlat_new_cache;
380 do {
381 if( xlat_new_cache_ptr->active ) {
382 xlat_promote_to_temp_space( xlat_new_cache_ptr );
383 }
384 allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
385 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
386 } while( allocation < size );
387 xlat_new_create_ptr = xlat_new_cache;
388 xlat_new_create_ptr->active = 1;
389 xlat_new_create_ptr->size = allocation;
390 xlat_new_create_ptr->lut_entry = lut_entry;
391 *lut_entry = &xlat_new_create_ptr->code;
392 memmove( xlat_new_create_ptr->code, olddata, oldsize );
393 } else {
394 if( xlat_new_cache_ptr->active ) {
395 xlat_promote_to_temp_space( xlat_new_cache_ptr );
396 }
397 xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
398 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
399 }
400 }
401 return xlat_new_create_ptr;
403 }
405 void xlat_commit_block( uint32_t destsize, uint32_t srcsize )
406 {
407 void **ptr = xlat_new_create_ptr->lut_entry;
408 void **endptr = ptr + (srcsize>>2);
409 while( ptr < endptr ) {
410 if( *ptr == NULL ) {
411 *ptr = XLAT_LUT_ENTRY_USED;
412 }
413 ptr++;
414 }
416 xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
417 }
419 void xlat_delete_block( xlat_cache_block_t block )
420 {
421 block->active = 0;
422 *block->lut_entry = NULL;
423 }
425 void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
426 {
427 int foundptr = 0;
428 xlat_cache_block_t tail =
429 (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
431 assert( tail->active == 1 );
432 assert( tail->size == 0 );
433 while( cache < tail ) {
434 assert( cache->active >= 0 && cache->active <= 2 );
435 assert( cache->size >= 0 && cache->size < size );
436 if( cache == ptr ) {
437 foundptr = 1;
438 }
439 cache = NEXT(cache);
440 }
441 assert( cache == tail );
442 assert( foundptr == 1 );
443 }
445 void xlat_check_integrity( )
446 {
447 xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
448 xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
449 xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
450 }
453 void xlat_disasm_block( FILE *out, void *block )
454 {
455 uint32_t buflen = xlat_get_block_size(block);
456 x86_set_symtab( NULL, 0 );
457 x86_disasm_block( out, block, buflen );
458 }
.