filename | src/xlat/xltcache.c |
changeset | 1195:072131b61d2a |
prev | 1189:1540105786c8 |
next | 1214:49152b3d8b75 |
author | nkeynes |
date | Mon Dec 12 21:15:44 2011 +1000 (12 years ago) |
permissions | -rw-r--r-- |
last change | Handle branch delay-slot instruction that falls on the next page correctly. - Generate the right end PC in the first place (sh4trans.c) - Allow blocks to be marked as both entry point + continuation, and specifically handle invalidation when first entry of a page is a continuation == flush previous page as well. |
view | annotate | diff | log | raw |
1 /**
2 * $Id$
3 *
4 * Translation cache management. This part is architecture independent.
5 *
6 * Copyright (c) 2005 Nathan Keynes.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
19 #include <sys/types.h>
20 #include <sys/mman.h>
21 #include <assert.h>
23 #include "dreamcast.h"
24 #include "sh4/sh4core.h"
25 #include "sh4/sh4trans.h"
26 #include "xlat/xltcache.h"
27 #include "x86dasm/x86dasm.h"
29 #define XLAT_LUT_PAGE_BITS 12
30 #define XLAT_LUT_TOTAL_BITS 28
31 #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
32 #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
34 #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
35 #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
36 #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
38 #define XLAT_LUT_ENTRY_EMPTY (void *)0
39 #define XLAT_LUT_ENTRY_USED (void *)1
41 #define XLAT_ADDR_FROM_ENTRY(pagenum,entrynum) ((((pagenum)&0xFFFF)<<13)|(((entrynum)<<1)&0x1FFE))
43 #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
44 #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
45 #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
46 #define IS_ENTRY_CONTINUATION(ent) (((uintptr_t)ent) & ((uintptr_t)XLAT_LUT_ENTRY_USED))
47 #define IS_FIRST_ENTRY_IN_PAGE(addr) (((addr)&0x1FFE) == 0)
48 #define XLAT_CODE_ADDR(ent) ((void *)(((uintptr_t)ent) & (~((uintptr_t)0x03))))
49 #define XLAT_BLOCK_FOR_LUT_ENTRY(ent) XLAT_BLOCK_FOR_CODE(XLAT_CODE_ADDR(ent))
52 #define MIN_BLOCK_SIZE 32
53 #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
55 #define BLOCK_INACTIVE 0
56 #define BLOCK_ACTIVE 1
57 #define BLOCK_USED 2
59 xlat_cache_block_t xlat_new_cache;
60 xlat_cache_block_t xlat_new_cache_ptr;
61 xlat_cache_block_t xlat_new_create_ptr;
63 #ifdef XLAT_GENERATIONAL_CACHE
64 xlat_cache_block_t xlat_temp_cache;
65 xlat_cache_block_t xlat_temp_cache_ptr;
66 xlat_cache_block_t xlat_old_cache;
67 xlat_cache_block_t xlat_old_cache_ptr;
68 #endif
70 static void **xlat_lut[XLAT_LUT_PAGES];
71 static gboolean xlat_initialized = FALSE;
73 void xlat_cache_init(void)
74 {
75 if( !xlat_initialized ) {
76 xlat_initialized = TRUE;
77 xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
78 MAP_PRIVATE|MAP_ANON, -1, 0 );
79 xlat_new_cache_ptr = xlat_new_cache;
80 xlat_new_create_ptr = xlat_new_cache;
81 #ifdef XLAT_GENERATIONAL_CACHE
82 xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
83 MAP_PRIVATE|MAP_ANON, -1, 0 );
84 xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
85 MAP_PRIVATE|MAP_ANON, -1, 0 );
86 xlat_temp_cache_ptr = xlat_temp_cache;
87 xlat_old_cache_ptr = xlat_old_cache;
88 #endif
89 // xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
90 // MAP_PRIVATE|MAP_ANON, -1, 0);
91 memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
92 }
93 xlat_flush_cache();
94 }
96 /**
97 * Reset the cache structure to its default state
98 */
99 void xlat_flush_cache()
100 {
101 xlat_cache_block_t tmp;
102 int i;
103 xlat_new_cache_ptr = xlat_new_cache;
104 xlat_new_cache_ptr->active = 0;
105 xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
106 tmp = NEXT(xlat_new_cache_ptr);
107 tmp->active = 1;
108 tmp->size = 0;
109 #ifdef XLAT_GENERATIONAL_CACHE
110 xlat_temp_cache_ptr = xlat_temp_cache;
111 xlat_temp_cache_ptr->active = 0;
112 xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
113 tmp = NEXT(xlat_temp_cache_ptr);
114 tmp->active = 1;
115 tmp->size = 0;
116 xlat_old_cache_ptr = xlat_old_cache;
117 xlat_old_cache_ptr->active = 0;
118 xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
119 tmp = NEXT(xlat_old_cache_ptr);
120 tmp->active = 1;
121 tmp->size = 0;
122 #endif
123 for( i=0; i<XLAT_LUT_PAGES; i++ ) {
124 if( xlat_lut[i] != NULL ) {
125 memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
126 }
127 }
128 }
130 void xlat_delete_block( xlat_cache_block_t block )
131 {
132 block->active = 0;
133 *block->lut_entry = block->chain;
134 sh4_translate_unlink_block( block->use_list );
135 }
137 static void xlat_flush_page_by_lut( void **page )
138 {
139 int i;
140 for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
141 if( IS_ENTRY_POINT(page[i]) ) {
142 void *p = XLAT_CODE_ADDR(page[i]);
143 do {
144 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
145 xlat_delete_block(block);
146 p = block->chain;
147 } while( p != NULL );
148 }
149 page[i] = NULL;
150 }
151 }
153 void FASTCALL xlat_invalidate_word( sh4addr_t addr )
154 {
155 void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
156 if( page != NULL ) {
157 int entry = XLAT_LUT_ENTRY(addr);
158 if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
159 /* First entry may be a delay-slot for the previous page */
160 xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
161 }
162 if( page[entry] != NULL ) {
163 xlat_flush_page_by_lut(page);
164 }
165 }
166 }
168 void FASTCALL xlat_invalidate_long( sh4addr_t addr )
169 {
170 void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
171 if( page != NULL ) {
172 int entry = XLAT_LUT_ENTRY(addr);
173 if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
174 /* First entry may be a delay-slot for the previous page */
175 xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
176 }
177 if( *(uint64_t *)&page[entry] != 0 ) {
178 xlat_flush_page_by_lut(page);
179 }
180 }
181 }
183 void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
184 {
185 int i;
186 int entry_count = size >> 1; // words;
187 uint32_t page_no = XLAT_LUT_PAGE(address);
188 int entry = XLAT_LUT_ENTRY(address);
190 if( entry == 0 && xlat_lut[page_no] != NULL && IS_ENTRY_CONTINUATION(xlat_lut[page_no][entry])) {
191 /* First entry may be a delay-slot for the previous page */
192 xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(address-2)]);
193 }
194 do {
195 void **page = xlat_lut[page_no];
196 int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
197 if( entry_count < page_entries ) {
198 page_entries = entry_count;
199 }
200 if( page != NULL ) {
201 if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
202 /* Overwriting the entire page anyway */
203 xlat_flush_page_by_lut(page);
204 } else {
205 for( i=entry; i<entry+page_entries; i++ ) {
206 if( page[i] != NULL ) {
207 xlat_flush_page_by_lut(page);
208 break;
209 }
210 }
211 }
212 entry_count -= page_entries;
213 }
214 page_no ++;
215 entry_count -= page_entries;
216 entry = 0;
217 } while( entry_count > 0 );
218 }
220 void FASTCALL xlat_flush_page( sh4addr_t address )
221 {
222 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
223 if( page != NULL ) {
224 xlat_flush_page_by_lut(page);
225 }
226 }
228 void * FASTCALL xlat_get_code( sh4addr_t address )
229 {
230 void *result = NULL;
231 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
232 if( page != NULL ) {
233 result = XLAT_CODE_ADDR(page[XLAT_LUT_ENTRY(address)]);
234 }
235 return result;
236 }
238 xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
239 {
240 if( code != NULL ) {
241 uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
242 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
243 uint32_t count = block->recover_table_size;
244 xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
245 uint32_t posn;
246 for( posn = 1; posn < count; posn++ ) {
247 if( records[posn].xlat_offset >= pc_offset ) {
248 return &records[posn-1];
249 }
250 }
251 return &records[count-1];
252 }
253 return NULL;
254 }
256 static void **xlat_get_lut_page( sh4addr_t address )
257 {
258 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
260 /* Add the LUT entry for the block */
261 if( page == NULL ) {
262 xlat_lut[XLAT_LUT_PAGE(address)] = page =
263 (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
264 MAP_PRIVATE|MAP_ANON, -1, 0 );
265 memset( page, 0, XLAT_LUT_PAGE_SIZE );
266 }
268 return page;
269 }
271 void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
272 {
273 void **page = xlat_get_lut_page(address);
274 return &page[XLAT_LUT_ENTRY(address)];
275 }
279 uint32_t FASTCALL xlat_get_block_size( void *block )
280 {
281 xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
282 return xlt->size;
283 }
285 uint32_t FASTCALL xlat_get_code_size( void *block )
286 {
287 xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
288 if( xlt->recover_table_offset == 0 ) {
289 return xlt->size;
290 } else {
291 return xlt->recover_table_offset;
292 }
293 }
295 /**
296 * Cut the specified block so that it has the given size, with the remaining data
297 * forming a new free block. If the free block would be less than the minimum size,
298 * the cut is not performed.
299 * @return the next block after the (possibly cut) block.
300 */
301 static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
302 {
303 cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
304 assert( cutsize <= block->size );
305 if( block->size > cutsize + MIN_TOTAL_SIZE ) {
306 int oldsize = block->size;
307 block->size = cutsize;
308 xlat_cache_block_t next = NEXT(block);
309 next->active = 0;
310 next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
311 return next;
312 } else {
313 return NEXT(block);
314 }
315 }
317 #ifdef XLAT_GENERATIONAL_CACHE
318 /**
319 * Promote a block in temp space (or elsewhere for that matter) to old space.
320 *
321 * @param block to promote.
322 */
323 static void xlat_promote_to_old_space( xlat_cache_block_t block )
324 {
325 int allocation = (int)-sizeof(struct xlat_cache_block);
326 int size = block->size;
327 xlat_cache_block_t curr = xlat_old_cache_ptr;
328 xlat_cache_block_t start_block = curr;
329 do {
330 allocation += curr->size + sizeof(struct xlat_cache_block);
331 curr = NEXT(curr);
332 if( allocation > size ) {
333 break; /* done */
334 }
335 if( curr->size == 0 ) { /* End-of-cache Sentinel */
336 /* Leave what we just released as free space and start again from the
337 * top of the cache
338 */
339 start_block->active = 0;
340 start_block->size = allocation;
341 allocation = (int)-sizeof(struct xlat_cache_block);
342 start_block = curr = xlat_old_cache;
343 }
344 } while(1);
345 start_block->active = 1;
346 start_block->size = allocation;
347 start_block->lut_entry = block->lut_entry;
348 start_block->chain = block->chain;
349 start_block->fpscr_mask = block->fpscr_mask;
350 start_block->fpscr = block->fpscr;
351 start_block->recover_table_offset = block->recover_table_offset;
352 start_block->recover_table_size = block->recover_table_size;
353 *block->lut_entry = &start_block->code;
354 memcpy( start_block->code, block->code, block->size );
355 xlat_old_cache_ptr = xlat_cut_block(start_block, size );
356 if( xlat_old_cache_ptr->size == 0 ) {
357 xlat_old_cache_ptr = xlat_old_cache;
358 }
359 }
361 /**
362 * Similarly to the above method, promotes a block to temp space.
363 * TODO: Try to combine these - they're nearly identical
364 */
365 void xlat_promote_to_temp_space( xlat_cache_block_t block )
366 {
367 int size = block->size;
368 int allocation = (int)-sizeof(struct xlat_cache_block);
369 xlat_cache_block_t curr = xlat_temp_cache_ptr;
370 xlat_cache_block_t start_block = curr;
371 do {
372 if( curr->active == BLOCK_USED ) {
373 xlat_promote_to_old_space( curr );
374 } else if( curr->active == BLOCK_ACTIVE ) {
375 // Active but not used, release block
376 *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
377 }
378 allocation += curr->size + sizeof(struct xlat_cache_block);
379 curr = NEXT(curr);
380 if( allocation > size ) {
381 break; /* done */
382 }
383 if( curr->size == 0 ) { /* End-of-cache Sentinel */
384 /* Leave what we just released as free space and start again from the
385 * top of the cache
386 */
387 start_block->active = 0;
388 start_block->size = allocation;
389 allocation = (int)-sizeof(struct xlat_cache_block);
390 start_block = curr = xlat_temp_cache;
391 }
392 } while(1);
393 start_block->active = 1;
394 start_block->size = allocation;
395 start_block->lut_entry = block->lut_entry;
396 start_block->chain = block->chain;
397 start_block->fpscr_mask = block->fpscr_mask;
398 start_block->fpscr = block->fpscr;
399 start_block->recover_table_offset = block->recover_table_offset;
400 start_block->recover_table_size = block->recover_table_size;
401 *block->lut_entry = &start_block->code;
402 memcpy( start_block->code, block->code, block->size );
403 xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
404 if( xlat_temp_cache_ptr->size == 0 ) {
405 xlat_temp_cache_ptr = xlat_temp_cache;
406 }
408 }
409 #else
410 void xlat_promote_to_temp_space( xlat_cache_block_t block )
411 {
412 *block->lut_entry = block->chain;
413 xlat_delete_block(block);
414 }
415 #endif
417 /**
418 * Returns the next block in the new cache list that can be written to by the
419 * translator. If the next block is active, it is evicted first.
420 */
421 xlat_cache_block_t xlat_start_block( sh4addr_t address )
422 {
423 if( xlat_new_cache_ptr->size == 0 ) {
424 xlat_new_cache_ptr = xlat_new_cache;
425 }
427 if( xlat_new_cache_ptr->active ) {
428 xlat_promote_to_temp_space( xlat_new_cache_ptr );
429 }
430 xlat_new_create_ptr = xlat_new_cache_ptr;
431 xlat_new_create_ptr->active = 1;
432 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
434 /* Add the LUT entry for the block */
435 void **p = xlat_get_lut_entry(address);
436 void *entry = *p;
437 if( IS_ENTRY_POINT(entry) ) {
438 xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
439 assert( oldblock->active );
440 xlat_new_create_ptr->chain = XLAT_CODE_ADDR(entry);
441 } else {
442 xlat_new_create_ptr->chain = NULL;
443 }
444 xlat_new_create_ptr->use_list = NULL;
446 *p = &xlat_new_create_ptr->code;
447 if( IS_ENTRY_CONTINUATION(entry) ) {
448 *((uintptr_t *)p) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
449 }
450 xlat_new_create_ptr->lut_entry = p;
452 return xlat_new_create_ptr;
453 }
455 xlat_cache_block_t xlat_extend_block( uint32_t newSize )
456 {
457 assert( xlat_new_create_ptr->use_list == NULL );
458 while( xlat_new_create_ptr->size < newSize ) {
459 if( xlat_new_cache_ptr->size == 0 ) {
460 /* Migrate to the front of the cache to keep it contiguous */
461 xlat_new_create_ptr->active = 0;
462 sh4ptr_t olddata = xlat_new_create_ptr->code;
463 int oldsize = xlat_new_create_ptr->size;
464 int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
465 void **lut_entry = xlat_new_create_ptr->lut_entry;
466 void *chain = xlat_new_create_ptr->chain;
467 int allocation = (int)-sizeof(struct xlat_cache_block);
468 xlat_new_cache_ptr = xlat_new_cache;
469 do {
470 if( xlat_new_cache_ptr->active ) {
471 xlat_promote_to_temp_space( xlat_new_cache_ptr );
472 }
473 allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
474 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
475 } while( allocation < size );
476 xlat_new_create_ptr = xlat_new_cache;
477 xlat_new_create_ptr->active = 1;
478 xlat_new_create_ptr->size = allocation;
479 xlat_new_create_ptr->lut_entry = lut_entry;
480 xlat_new_create_ptr->chain = chain;
481 xlat_new_create_ptr->use_list = NULL;
482 *lut_entry = &xlat_new_create_ptr->code;
483 memmove( xlat_new_create_ptr->code, olddata, oldsize );
484 } else {
485 if( xlat_new_cache_ptr->active ) {
486 xlat_promote_to_temp_space( xlat_new_cache_ptr );
487 }
488 xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
489 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
490 }
491 }
492 return xlat_new_create_ptr;
494 }
496 void xlat_commit_block( uint32_t destsize, sh4addr_t startpc, sh4addr_t endpc )
497 {
498 void **entry = xlat_get_lut_entry(startpc+2);
499 /* assume main entry has already been set at this point */
501 for( sh4addr_t pc = startpc+2; pc < endpc; pc += 2 ) {
502 if( XLAT_LUT_ENTRY(pc) == 0 )
503 entry = xlat_get_lut_entry(pc);
504 *((uintptr_t *)entry) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
505 entry++;
506 }
508 xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
509 }
511 void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
512 {
513 int foundptr = 0;
514 xlat_cache_block_t tail =
515 (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
517 assert( tail->active == 1 );
518 assert( tail->size == 0 );
519 while( cache < tail ) {
520 assert( cache->active >= 0 && cache->active <= 2 );
521 assert( cache->size >= 0 && cache->size < size );
522 if( cache == ptr ) {
523 foundptr = 1;
524 }
525 cache = NEXT(cache);
526 }
527 assert( cache == tail );
528 assert( foundptr == 1 || tail == ptr );
529 }
531 /**
532 * Perform a reverse lookup to determine the SH4 address corresponding to
533 * the start of the code block containing ptr. This is _slow_ - it does a
534 * linear scan of the lookup table to find this.
535 *
536 * If the pointer cannot be found in any live block, returns -1 (as this
537 * is not a legal PC)
538 */
539 sh4addr_t xlat_get_address( unsigned char *ptr )
540 {
541 int i,j;
542 for( i=0; i<XLAT_LUT_PAGES; i++ ) {
543 void **page = xlat_lut[i];
544 if( page != NULL ) {
545 for( j=0; j<XLAT_LUT_PAGE_ENTRIES; j++ ) {
546 void *entry = page[j];
547 if( ((uintptr_t)entry) > (uintptr_t)XLAT_LUT_ENTRY_USED ) {
548 xlat_cache_block_t block = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
549 if( ptr >= block->code && ptr < block->code + block->size) {
550 /* Found it */
551 return (i<<13) | (j<<1);
552 }
553 }
554 }
555 }
556 }
557 return -1;
558 }
560 /**
561 * Sanity check that the given pointer is at least contained in one of cache
562 * regions, and has a sane-ish size. We don't do a full region walk atm.
563 */
564 gboolean xlat_is_code_pointer( void *p )
565 {
566 char *region;
567 uintptr_t region_size;
569 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
570 if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) {
571 /* Pointer is in new cache */
572 region = (char *)xlat_new_cache;
573 region_size = XLAT_NEW_CACHE_SIZE;
574 }
575 #ifdef XLAT_GENERATIONAL_CACHE
576 else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) {
577 /* Pointer is in temp cache */
578 region = (char *)xlat_temp_cache;
579 region_size = XLAT_TEMP_CACHE_SIZE;
580 } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) {
581 /* Pointer is in old cache */
582 region = (char *)xlat_old_cache;
583 region_size = XLAT_OLD_CACHE_SIZE;
584 }
585 #endif
586 else {
587 /* Not a valid cache pointer */
588 return FALSE;
589 }
591 /* Make sure the whole block is in the region */
592 if( (((char *)p) - region) >= region_size ||
593 (((char *)(NEXT(block))) - region) >= region_size )
594 return FALSE;
595 return TRUE;
596 }
598 void xlat_check_integrity( )
599 {
600 xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
601 #ifdef XLAT_GENERATIONAL_CACHE
602 xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
603 xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
604 #endif
605 }
607 unsigned int xlat_get_active_block_count()
608 {
609 unsigned int count = 0;
610 xlat_cache_block_t ptr = xlat_new_cache;
611 while( ptr->size != 0 ) {
612 if( ptr->active != 0 ) {
613 count++;
614 }
615 ptr = NEXT(ptr);
616 }
617 return count;
618 }
620 unsigned int xlat_get_active_blocks( struct xlat_block_ref *blocks, unsigned int size )
621 {
622 unsigned int count = 0;
623 xlat_cache_block_t ptr = xlat_new_cache;
624 while( ptr->size != 0 ) {
625 if( ptr->active != 0 ) {
626 blocks[count].block = ptr;
627 blocks[count].pc = 0;
628 count++;
629 }
630 if( count >= size )
631 break;
632 ptr = NEXT(ptr);
633 }
634 return count;
635 }
637 static void xlat_get_block_pcs( struct xlat_block_ref *blocks, unsigned int size )
638 {
639 unsigned i;
640 for( i=0; i<XLAT_LUT_PAGES;i ++ ) {
641 void **page = xlat_lut[i];
642 if( page != NULL ) {
643 for( unsigned j=0; j < XLAT_LUT_PAGE_ENTRIES; j++ ) {
644 void *code = XLAT_CODE_ADDR(page[j]);
645 if( code != NULL ) {
646 xlat_cache_block_t ptr = XLAT_BLOCK_FOR_CODE(code);
647 sh4addr_t pc = XLAT_ADDR_FROM_ENTRY(i,j);
648 for( unsigned k=0; k<size; k++ ) {
649 if( blocks[k].block == ptr ) {
650 blocks[k].pc = pc;
651 ptr = ptr->chain;
652 if( ptr == NULL )
653 break;
654 else {
655 ptr = XLAT_BLOCK_FOR_CODE(ptr);
656 k = 0;
657 }
658 }
659 }
660 }
661 }
662 }
663 }
664 }
666 static int xlat_compare_active_field( const void *a, const void *b )
667 {
668 const struct xlat_block_ref *ptra = (const struct xlat_block_ref *)a;
669 const struct xlat_block_ref *ptrb = (const struct xlat_block_ref *)b;
670 return ptrb->block->active - ptra->block->active;
671 }
673 unsigned int xlat_get_cache_blocks_by_activity( xlat_block_ref_t outblocks, size_t topN )
674 {
675 int i=0;
676 int count = xlat_get_active_block_count();
678 struct xlat_block_ref blocks[count];
679 xlat_get_active_blocks(blocks, count);
680 xlat_get_block_pcs(blocks,count);
681 qsort(blocks, count, sizeof(struct xlat_block_ref), xlat_compare_active_field);
683 if( topN > count )
684 topN = count;
685 memcpy(outblocks, blocks, topN*sizeof(struct xlat_block_ref));
686 return topN;
687 }
.