4 * Translation cache management. This part is architecture independent.
6 * Copyright (c) 2005 Nathan Keynes.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <sys/types.h>
23 #include "dreamcast.h"
24 #include "sh4/sh4core.h"
25 #include "sh4/sh4trans.h"
26 #include "xlat/xltcache.h"
27 #include "x86dasm/x86dasm.h"
29 #define XLAT_LUT_PAGE_BITS 12
30 #define XLAT_LUT_TOTAL_BITS 28
31 #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
32 #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
34 #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
35 #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
36 #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
38 #define XLAT_LUT_ENTRY_EMPTY (void *)0
39 #define XLAT_LUT_ENTRY_USED (void *)1
41 #define XLAT_ADDR_FROM_ENTRY(pagenum,entrynum) ((((pagenum)&0xFFFF)<<13)|(((entrynum)<<1)&0x1FFE))
43 #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
44 #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
45 #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
46 #define IS_ENTRY_CONTINUATION(ent) (((uintptr_t)ent) & ((uintptr_t)XLAT_LUT_ENTRY_USED))
47 #define IS_FIRST_ENTRY_IN_PAGE(addr) (((addr)&0x1FFE) == 0)
48 #define XLAT_CODE_ADDR(ent) ((void *)(((uintptr_t)ent) & (~((uintptr_t)0x03))))
49 #define XLAT_BLOCK_FOR_LUT_ENTRY(ent) XLAT_BLOCK_FOR_CODE(XLAT_CODE_ADDR(ent))
52 #define MIN_BLOCK_SIZE 32
53 #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
55 #define BLOCK_INACTIVE 0
56 #define BLOCK_ACTIVE 1
59 xlat_cache_block_t xlat_new_cache;
60 xlat_cache_block_t xlat_new_cache_ptr;
61 xlat_cache_block_t xlat_new_create_ptr;
63 #ifdef XLAT_GENERATIONAL_CACHE
64 xlat_cache_block_t xlat_temp_cache;
65 xlat_cache_block_t xlat_temp_cache_ptr;
66 xlat_cache_block_t xlat_old_cache;
67 xlat_cache_block_t xlat_old_cache_ptr;
70 static void **xlat_lut[XLAT_LUT_PAGES];
71 static gboolean xlat_initialized = FALSE;
72 static xlat_target_fns_t xlat_target = NULL;
74 void xlat_cache_init(void)
76 if( !xlat_initialized ) {
77 xlat_initialized = TRUE;
78 xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
79 MAP_PRIVATE|MAP_ANON, -1, 0 );
80 xlat_new_cache_ptr = xlat_new_cache;
81 xlat_new_create_ptr = xlat_new_cache;
82 #ifdef XLAT_GENERATIONAL_CACHE
83 xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
84 MAP_PRIVATE|MAP_ANON, -1, 0 );
85 xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
86 MAP_PRIVATE|MAP_ANON, -1, 0 );
87 xlat_temp_cache_ptr = xlat_temp_cache;
88 xlat_old_cache_ptr = xlat_old_cache;
90 // xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
91 // MAP_PRIVATE|MAP_ANON, -1, 0);
92 memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
97 void xlat_set_target_fns( xlat_target_fns_t target )
103 * Reset the cache structure to its default state
105 void xlat_flush_cache()
107 xlat_cache_block_t tmp;
109 xlat_new_cache_ptr = xlat_new_cache;
110 xlat_new_cache_ptr->active = 0;
111 xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
112 tmp = NEXT(xlat_new_cache_ptr);
115 #ifdef XLAT_GENERATIONAL_CACHE
116 xlat_temp_cache_ptr = xlat_temp_cache;
117 xlat_temp_cache_ptr->active = 0;
118 xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
119 tmp = NEXT(xlat_temp_cache_ptr);
122 xlat_old_cache_ptr = xlat_old_cache;
123 xlat_old_cache_ptr->active = 0;
124 xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
125 tmp = NEXT(xlat_old_cache_ptr);
129 for( i=0; i<XLAT_LUT_PAGES; i++ ) {
130 if( xlat_lut[i] != NULL ) {
131 memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
136 void xlat_delete_block( xlat_cache_block_t block )
139 *block->lut_entry = block->chain;
140 if( block->use_list != NULL )
141 xlat_target->unlink_block(block->use_list);
144 static void xlat_flush_page_by_lut( void **page )
147 for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
148 if( IS_ENTRY_POINT(page[i]) ) {
149 void *p = XLAT_CODE_ADDR(page[i]);
151 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
152 xlat_delete_block(block);
154 } while( p != NULL );
160 void FASTCALL xlat_invalidate_word( sh4addr_t addr )
162 void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
164 int entry = XLAT_LUT_ENTRY(addr);
165 if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
166 /* First entry may be a delay-slot for the previous page */
167 xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
169 if( page[entry] != NULL ) {
170 xlat_flush_page_by_lut(page);
175 void FASTCALL xlat_invalidate_long( sh4addr_t addr )
177 void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
179 int entry = XLAT_LUT_ENTRY(addr);
180 if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
181 /* First entry may be a delay-slot for the previous page */
182 xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
184 if( *(uint64_t *)&page[entry] != 0 ) {
185 xlat_flush_page_by_lut(page);
190 void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
193 int entry_count = size >> 1; // words;
194 uint32_t page_no = XLAT_LUT_PAGE(address);
195 int entry = XLAT_LUT_ENTRY(address);
197 if( entry == 0 && xlat_lut[page_no] != NULL && IS_ENTRY_CONTINUATION(xlat_lut[page_no][entry])) {
198 /* First entry may be a delay-slot for the previous page */
199 xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(address-2)]);
202 void **page = xlat_lut[page_no];
203 int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
204 if( entry_count < page_entries ) {
205 page_entries = entry_count;
208 if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
209 /* Overwriting the entire page anyway */
210 xlat_flush_page_by_lut(page);
212 for( i=entry; i<entry+page_entries; i++ ) {
213 if( page[i] != NULL ) {
214 xlat_flush_page_by_lut(page);
219 entry_count -= page_entries;
222 entry_count -= page_entries;
224 } while( entry_count > 0 );
227 void FASTCALL xlat_flush_page( sh4addr_t address )
229 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
231 xlat_flush_page_by_lut(page);
235 void * FASTCALL xlat_get_code( sh4addr_t address )
238 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
240 result = XLAT_CODE_ADDR(page[XLAT_LUT_ENTRY(address)]);
245 xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
248 uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
249 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
250 uint32_t count = block->recover_table_size;
251 xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
253 for( posn = 1; posn < count; posn++ ) {
254 if( records[posn].xlat_offset >= pc_offset ) {
255 return &records[posn-1];
258 return &records[count-1];
263 static void **xlat_get_lut_page( sh4addr_t address )
265 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
267 /* Add the LUT entry for the block */
269 xlat_lut[XLAT_LUT_PAGE(address)] = page =
270 (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
271 MAP_PRIVATE|MAP_ANON, -1, 0 );
272 memset( page, 0, XLAT_LUT_PAGE_SIZE );
278 void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
280 void **page = xlat_get_lut_page(address);
281 return &page[XLAT_LUT_ENTRY(address)];
286 uint32_t FASTCALL xlat_get_block_size( void *block )
288 xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
292 uint32_t FASTCALL xlat_get_code_size( void *block )
294 xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
295 if( xlt->recover_table_offset == 0 ) {
298 return xlt->recover_table_offset;
303 * Cut the specified block so that it has the given size, with the remaining data
304 * forming a new free block. If the free block would be less than the minimum size,
305 * the cut is not performed.
306 * @return the next block after the (possibly cut) block.
308 static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
310 cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
311 assert( cutsize <= block->size );
312 if( block->size > cutsize + MIN_TOTAL_SIZE ) {
313 int oldsize = block->size;
314 block->size = cutsize;
315 xlat_cache_block_t next = NEXT(block);
317 next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
324 #ifdef XLAT_GENERATIONAL_CACHE
326 * Promote a block in temp space (or elsewhere for that matter) to old space.
328 * @param block to promote.
330 static void xlat_promote_to_old_space( xlat_cache_block_t block )
332 int allocation = (int)-sizeof(struct xlat_cache_block);
333 int size = block->size;
334 xlat_cache_block_t curr = xlat_old_cache_ptr;
335 xlat_cache_block_t start_block = curr;
337 allocation += curr->size + sizeof(struct xlat_cache_block);
339 if( allocation > size ) {
342 if( curr->size == 0 ) { /* End-of-cache Sentinel */
343 /* Leave what we just released as free space and start again from the
346 start_block->active = 0;
347 start_block->size = allocation;
348 allocation = (int)-sizeof(struct xlat_cache_block);
349 start_block = curr = xlat_old_cache;
352 start_block->active = 1;
353 start_block->size = allocation;
354 start_block->lut_entry = block->lut_entry;
355 start_block->chain = block->chain;
356 start_block->fpscr_mask = block->fpscr_mask;
357 start_block->fpscr = block->fpscr;
358 start_block->recover_table_offset = block->recover_table_offset;
359 start_block->recover_table_size = block->recover_table_size;
360 *block->lut_entry = &start_block->code;
361 memcpy( start_block->code, block->code, block->size );
362 xlat_old_cache_ptr = xlat_cut_block(start_block, size );
363 if( xlat_old_cache_ptr->size == 0 ) {
364 xlat_old_cache_ptr = xlat_old_cache;
369 * Similarly to the above method, promotes a block to temp space.
370 * TODO: Try to combine these - they're nearly identical
372 void xlat_promote_to_temp_space( xlat_cache_block_t block )
374 int size = block->size;
375 int allocation = (int)-sizeof(struct xlat_cache_block);
376 xlat_cache_block_t curr = xlat_temp_cache_ptr;
377 xlat_cache_block_t start_block = curr;
379 if( curr->active == BLOCK_USED ) {
380 xlat_promote_to_old_space( curr );
381 } else if( curr->active == BLOCK_ACTIVE ) {
382 // Active but not used, release block
383 *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
385 allocation += curr->size + sizeof(struct xlat_cache_block);
387 if( allocation > size ) {
390 if( curr->size == 0 ) { /* End-of-cache Sentinel */
391 /* Leave what we just released as free space and start again from the
394 start_block->active = 0;
395 start_block->size = allocation;
396 allocation = (int)-sizeof(struct xlat_cache_block);
397 start_block = curr = xlat_temp_cache;
400 start_block->active = 1;
401 start_block->size = allocation;
402 start_block->lut_entry = block->lut_entry;
403 start_block->chain = block->chain;
404 start_block->fpscr_mask = block->fpscr_mask;
405 start_block->fpscr = block->fpscr;
406 start_block->recover_table_offset = block->recover_table_offset;
407 start_block->recover_table_size = block->recover_table_size;
408 *block->lut_entry = &start_block->code;
409 memcpy( start_block->code, block->code, block->size );
410 xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
411 if( xlat_temp_cache_ptr->size == 0 ) {
412 xlat_temp_cache_ptr = xlat_temp_cache;
417 void xlat_promote_to_temp_space( xlat_cache_block_t block )
419 *block->lut_entry = block->chain;
420 xlat_delete_block(block);
425 * Returns the next block in the new cache list that can be written to by the
426 * translator. If the next block is active, it is evicted first.
428 xlat_cache_block_t xlat_start_block( sh4addr_t address )
430 if( xlat_new_cache_ptr->size == 0 ) {
431 xlat_new_cache_ptr = xlat_new_cache;
434 if( xlat_new_cache_ptr->active ) {
435 xlat_promote_to_temp_space( xlat_new_cache_ptr );
437 xlat_new_create_ptr = xlat_new_cache_ptr;
438 xlat_new_create_ptr->active = 1;
439 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
441 /* Add the LUT entry for the block */
442 void **p = xlat_get_lut_entry(address);
444 if( IS_ENTRY_POINT(entry) ) {
445 xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
446 assert( oldblock->active );
447 xlat_new_create_ptr->chain = XLAT_CODE_ADDR(entry);
449 xlat_new_create_ptr->chain = NULL;
451 xlat_new_create_ptr->use_list = NULL;
453 *p = &xlat_new_create_ptr->code;
454 if( IS_ENTRY_CONTINUATION(entry) ) {
455 *((uintptr_t *)p) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
457 xlat_new_create_ptr->lut_entry = p;
459 return xlat_new_create_ptr;
462 xlat_cache_block_t xlat_extend_block( uint32_t newSize )
464 assert( xlat_new_create_ptr->use_list == NULL );
465 while( xlat_new_create_ptr->size < newSize ) {
466 if( xlat_new_cache_ptr->size == 0 ) {
467 /* Migrate to the front of the cache to keep it contiguous */
468 xlat_new_create_ptr->active = 0;
469 sh4ptr_t olddata = xlat_new_create_ptr->code;
470 int oldsize = xlat_new_create_ptr->size;
471 int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
472 void **lut_entry = xlat_new_create_ptr->lut_entry;
473 void *chain = xlat_new_create_ptr->chain;
474 int allocation = (int)-sizeof(struct xlat_cache_block);
475 xlat_new_cache_ptr = xlat_new_cache;
477 if( xlat_new_cache_ptr->active ) {
478 xlat_promote_to_temp_space( xlat_new_cache_ptr );
480 allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
481 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
482 } while( allocation < size );
483 xlat_new_create_ptr = xlat_new_cache;
484 xlat_new_create_ptr->active = 1;
485 xlat_new_create_ptr->size = allocation;
486 xlat_new_create_ptr->lut_entry = lut_entry;
487 xlat_new_create_ptr->chain = chain;
488 xlat_new_create_ptr->use_list = NULL;
489 *lut_entry = &xlat_new_create_ptr->code;
490 memmove( xlat_new_create_ptr->code, olddata, oldsize );
492 if( xlat_new_cache_ptr->active ) {
493 xlat_promote_to_temp_space( xlat_new_cache_ptr );
495 xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
496 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
499 return xlat_new_create_ptr;
503 void xlat_commit_block( uint32_t destsize, sh4addr_t startpc, sh4addr_t endpc )
505 void **entry = xlat_get_lut_entry(startpc+2);
506 /* assume main entry has already been set at this point */
508 for( sh4addr_t pc = startpc+2; pc < endpc; pc += 2 ) {
509 if( XLAT_LUT_ENTRY(pc) == 0 )
510 entry = xlat_get_lut_entry(pc);
511 *((uintptr_t *)entry) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
515 xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
518 void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
521 xlat_cache_block_t tail =
522 (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
524 assert( tail->active == 1 );
525 assert( tail->size == 0 );
526 while( cache < tail ) {
527 assert( cache->active >= 0 && cache->active <= 2 );
528 assert( cache->size >= 0 && cache->size < size );
534 assert( cache == tail );
535 assert( foundptr == 1 || tail == ptr );
539 * Perform a reverse lookup to determine the SH4 address corresponding to
540 * the start of the code block containing ptr. This is _slow_ - it does a
541 * linear scan of the lookup table to find this.
543 * If the pointer cannot be found in any live block, returns -1 (as this
546 sh4addr_t xlat_get_address( unsigned char *ptr )
549 for( i=0; i<XLAT_LUT_PAGES; i++ ) {
550 void **page = xlat_lut[i];
552 for( j=0; j<XLAT_LUT_PAGE_ENTRIES; j++ ) {
553 void *entry = page[j];
554 if( ((uintptr_t)entry) > (uintptr_t)XLAT_LUT_ENTRY_USED ) {
555 xlat_cache_block_t block = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
556 if( ptr >= block->code && ptr < block->code + block->size) {
558 return (i<<13) | (j<<1);
568 * Sanity check that the given pointer is at least contained in one of cache
569 * regions, and has a sane-ish size. We don't do a full region walk atm.
571 gboolean xlat_is_code_pointer( void *p )
574 uintptr_t region_size;
576 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
577 if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) {
578 /* Pointer is in new cache */
579 region = (char *)xlat_new_cache;
580 region_size = XLAT_NEW_CACHE_SIZE;
582 #ifdef XLAT_GENERATIONAL_CACHE
583 else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) {
584 /* Pointer is in temp cache */
585 region = (char *)xlat_temp_cache;
586 region_size = XLAT_TEMP_CACHE_SIZE;
587 } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) {
588 /* Pointer is in old cache */
589 region = (char *)xlat_old_cache;
590 region_size = XLAT_OLD_CACHE_SIZE;
594 /* Not a valid cache pointer */
598 /* Make sure the whole block is in the region */
599 if( (((char *)p) - region) >= region_size ||
600 (((char *)(NEXT(block))) - region) >= region_size )
605 void xlat_check_integrity( )
607 xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
608 #ifdef XLAT_GENERATIONAL_CACHE
609 xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
610 xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
614 unsigned int xlat_get_active_block_count()
616 unsigned int count = 0;
617 xlat_cache_block_t ptr = xlat_new_cache;
618 while( ptr->size != 0 ) {
619 if( ptr->active != 0 ) {
627 unsigned int xlat_get_active_blocks( struct xlat_block_ref *blocks, unsigned int size )
629 unsigned int count = 0;
630 xlat_cache_block_t ptr = xlat_new_cache;
631 while( ptr->size != 0 ) {
632 if( ptr->active != 0 ) {
633 blocks[count].block = ptr;
634 blocks[count].pc = 0;
644 static void xlat_get_block_pcs( struct xlat_block_ref *blocks, unsigned int size )
647 for( i=0; i<XLAT_LUT_PAGES;i ++ ) {
648 void **page = xlat_lut[i];
650 for( unsigned j=0; j < XLAT_LUT_PAGE_ENTRIES; j++ ) {
651 void *code = XLAT_CODE_ADDR(page[j]);
653 xlat_cache_block_t ptr = XLAT_BLOCK_FOR_CODE(code);
654 sh4addr_t pc = XLAT_ADDR_FROM_ENTRY(i,j);
655 for( unsigned k=0; k<size; k++ ) {
656 if( blocks[k].block == ptr ) {
662 ptr = XLAT_BLOCK_FOR_CODE(ptr);
673 static int xlat_compare_active_field( const void *a, const void *b )
675 const struct xlat_block_ref *ptra = (const struct xlat_block_ref *)a;
676 const struct xlat_block_ref *ptrb = (const struct xlat_block_ref *)b;
677 return ptrb->block->active - ptra->block->active;
680 unsigned int xlat_get_cache_blocks_by_activity( xlat_block_ref_t outblocks, size_t topN )
683 int count = xlat_get_active_block_count();
685 struct xlat_block_ref blocks[count];
686 xlat_get_active_blocks(blocks, count);
687 xlat_get_block_pcs(blocks,count);
688 qsort(blocks, count, sizeof(struct xlat_block_ref), xlat_compare_active_field);
692 memcpy(outblocks, blocks, topN*sizeof(struct xlat_block_ref));
.