4 * Translation cache management. This part is architecture independent.
6 * Copyright (c) 2005 Nathan Keynes.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <sys/types.h>
23 #include "dreamcast.h"
24 #include "sh4/sh4core.h"
25 #include "sh4/sh4trans.h"
26 #include "xlat/xltcache.h"
28 #define XLAT_LUT_PAGE_BITS 12
29 #define XLAT_LUT_TOTAL_BITS 28
30 #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
31 #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
33 #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
34 #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
35 #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
37 #define XLAT_LUT_ENTRY_EMPTY (void *)0
38 #define XLAT_LUT_ENTRY_USED (void *)1
40 #define XLAT_ADDR_FROM_ENTRY(pagenum,entrynum) ((((pagenum)&0xFFFF)<<13)|(((entrynum)<<1)&0x1FFE))
42 #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
43 #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
44 #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
45 #define IS_ENTRY_CONTINUATION(ent) (((uintptr_t)ent) & ((uintptr_t)XLAT_LUT_ENTRY_USED))
46 #define IS_FIRST_ENTRY_IN_PAGE(addr) (((addr)&0x1FFE) == 0)
47 #define XLAT_CODE_ADDR(ent) ((void *)(((uintptr_t)ent) & (~((uintptr_t)0x03))))
48 #define XLAT_BLOCK_FOR_LUT_ENTRY(ent) XLAT_BLOCK_FOR_CODE(XLAT_CODE_ADDR(ent))
51 #define MIN_BLOCK_SIZE 32
52 #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
54 #define BLOCK_INACTIVE 0
55 #define BLOCK_ACTIVE 1
58 xlat_cache_block_t xlat_new_cache;
59 xlat_cache_block_t xlat_new_cache_ptr;
60 xlat_cache_block_t xlat_new_create_ptr;
62 #ifdef XLAT_GENERATIONAL_CACHE
63 xlat_cache_block_t xlat_temp_cache;
64 xlat_cache_block_t xlat_temp_cache_ptr;
65 xlat_cache_block_t xlat_old_cache;
66 xlat_cache_block_t xlat_old_cache_ptr;
69 static void **xlat_lut[XLAT_LUT_PAGES];
70 static gboolean xlat_initialized = FALSE;
71 static xlat_target_fns_t xlat_target = NULL;
73 void xlat_cache_init(void)
75 if( !xlat_initialized ) {
76 xlat_initialized = TRUE;
77 xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
78 MAP_PRIVATE|MAP_ANON, -1, 0 );
79 xlat_new_cache_ptr = xlat_new_cache;
80 xlat_new_create_ptr = xlat_new_cache;
81 #ifdef XLAT_GENERATIONAL_CACHE
82 xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
83 MAP_PRIVATE|MAP_ANON, -1, 0 );
84 xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
85 MAP_PRIVATE|MAP_ANON, -1, 0 );
86 xlat_temp_cache_ptr = xlat_temp_cache;
87 xlat_old_cache_ptr = xlat_old_cache;
89 // xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
90 // MAP_PRIVATE|MAP_ANON, -1, 0);
91 memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
96 void xlat_set_target_fns( xlat_target_fns_t target )
102 * Reset the cache structure to its default state
104 void xlat_flush_cache()
106 xlat_cache_block_t tmp;
108 xlat_new_cache_ptr = xlat_new_cache;
109 xlat_new_cache_ptr->active = 0;
110 xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
111 tmp = NEXT(xlat_new_cache_ptr);
114 #ifdef XLAT_GENERATIONAL_CACHE
115 xlat_temp_cache_ptr = xlat_temp_cache;
116 xlat_temp_cache_ptr->active = 0;
117 xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
118 tmp = NEXT(xlat_temp_cache_ptr);
121 xlat_old_cache_ptr = xlat_old_cache;
122 xlat_old_cache_ptr->active = 0;
123 xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
124 tmp = NEXT(xlat_old_cache_ptr);
128 for( i=0; i<XLAT_LUT_PAGES; i++ ) {
129 if( xlat_lut[i] != NULL ) {
130 memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
135 void xlat_delete_block( xlat_cache_block_t block )
138 *block->lut_entry = block->chain;
139 if( block->use_list != NULL )
140 xlat_target->unlink_block(block->use_list);
143 static void xlat_flush_page_by_lut( void **page )
146 for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
147 if( IS_ENTRY_POINT(page[i]) ) {
148 void *p = XLAT_CODE_ADDR(page[i]);
150 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
151 xlat_delete_block(block);
153 } while( p != NULL );
159 void FASTCALL xlat_invalidate_word( sh4addr_t addr )
161 void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
163 int entry = XLAT_LUT_ENTRY(addr);
164 if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
165 /* First entry may be a delay-slot for the previous page */
166 xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
168 if( page[entry] != NULL ) {
169 xlat_flush_page_by_lut(page);
174 void FASTCALL xlat_invalidate_long( sh4addr_t addr )
176 void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
178 int entry = XLAT_LUT_ENTRY(addr);
179 if( entry == 0 && IS_ENTRY_CONTINUATION(page[entry]) ) {
180 /* First entry may be a delay-slot for the previous page */
181 xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(addr-2)]);
183 if( *(uint64_t *)&page[entry] != 0 ) {
184 xlat_flush_page_by_lut(page);
189 void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
192 int entry_count = size >> 1; // words;
193 uint32_t page_no = XLAT_LUT_PAGE(address);
194 int entry = XLAT_LUT_ENTRY(address);
196 if( entry == 0 && xlat_lut[page_no] != NULL && IS_ENTRY_CONTINUATION(xlat_lut[page_no][entry])) {
197 /* First entry may be a delay-slot for the previous page */
198 xlat_flush_page_by_lut(xlat_lut[XLAT_LUT_PAGE(address-2)]);
201 void **page = xlat_lut[page_no];
202 int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
203 if( entry_count < page_entries ) {
204 page_entries = entry_count;
207 if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
208 /* Overwriting the entire page anyway */
209 xlat_flush_page_by_lut(page);
211 for( i=entry; i<entry+page_entries; i++ ) {
212 if( page[i] != NULL ) {
213 xlat_flush_page_by_lut(page);
218 entry_count -= page_entries;
221 entry_count -= page_entries;
223 } while( entry_count > 0 );
226 void FASTCALL xlat_flush_page( sh4addr_t address )
228 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
230 xlat_flush_page_by_lut(page);
234 void * FASTCALL xlat_get_code( sh4addr_t address )
237 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
239 result = XLAT_CODE_ADDR(page[XLAT_LUT_ENTRY(address)]);
244 xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
247 uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
248 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
249 uint32_t count = block->recover_table_size;
250 xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
252 for( posn = 1; posn < count; posn++ ) {
253 if( records[posn].xlat_offset >= pc_offset ) {
254 return &records[posn-1];
257 return &records[count-1];
262 static void **xlat_get_lut_page( sh4addr_t address )
264 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
266 /* Add the LUT entry for the block */
268 xlat_lut[XLAT_LUT_PAGE(address)] = page =
269 (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
270 MAP_PRIVATE|MAP_ANON, -1, 0 );
271 memset( page, 0, XLAT_LUT_PAGE_SIZE );
277 void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
279 void **page = xlat_get_lut_page(address);
280 return &page[XLAT_LUT_ENTRY(address)];
285 uint32_t FASTCALL xlat_get_block_size( void *block )
287 xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
291 uint32_t FASTCALL xlat_get_code_size( void *block )
293 xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
294 if( xlt->recover_table_offset == 0 ) {
297 return xlt->recover_table_offset;
302 * Cut the specified block so that it has the given size, with the remaining data
303 * forming a new free block. If the free block would be less than the minimum size,
304 * the cut is not performed.
305 * @return the next block after the (possibly cut) block.
307 static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
309 cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
310 assert( cutsize <= block->size );
311 if( block->size > cutsize + MIN_TOTAL_SIZE ) {
312 int oldsize = block->size;
313 block->size = cutsize;
314 xlat_cache_block_t next = NEXT(block);
316 next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
323 #ifdef XLAT_GENERATIONAL_CACHE
325 * Promote a block in temp space (or elsewhere for that matter) to old space.
327 * @param block to promote.
329 static void xlat_promote_to_old_space( xlat_cache_block_t block )
331 int allocation = (int)-sizeof(struct xlat_cache_block);
332 int size = block->size;
333 xlat_cache_block_t curr = xlat_old_cache_ptr;
334 xlat_cache_block_t start_block = curr;
336 allocation += curr->size + sizeof(struct xlat_cache_block);
338 if( allocation > size ) {
341 if( curr->size == 0 ) { /* End-of-cache Sentinel */
342 /* Leave what we just released as free space and start again from the
345 start_block->active = 0;
346 start_block->size = allocation;
347 allocation = (int)-sizeof(struct xlat_cache_block);
348 start_block = curr = xlat_old_cache;
351 start_block->active = 1;
352 start_block->size = allocation;
353 start_block->lut_entry = block->lut_entry;
354 start_block->chain = block->chain;
355 start_block->fpscr_mask = block->fpscr_mask;
356 start_block->fpscr = block->fpscr;
357 start_block->recover_table_offset = block->recover_table_offset;
358 start_block->recover_table_size = block->recover_table_size;
359 *block->lut_entry = &start_block->code;
360 memcpy( start_block->code, block->code, block->size );
361 xlat_old_cache_ptr = xlat_cut_block(start_block, size );
362 if( xlat_old_cache_ptr->size == 0 ) {
363 xlat_old_cache_ptr = xlat_old_cache;
368 * Similarly to the above method, promotes a block to temp space.
369 * TODO: Try to combine these - they're nearly identical
371 void xlat_promote_to_temp_space( xlat_cache_block_t block )
373 int size = block->size;
374 int allocation = (int)-sizeof(struct xlat_cache_block);
375 xlat_cache_block_t curr = xlat_temp_cache_ptr;
376 xlat_cache_block_t start_block = curr;
378 if( curr->active == BLOCK_USED ) {
379 xlat_promote_to_old_space( curr );
380 } else if( curr->active == BLOCK_ACTIVE ) {
381 // Active but not used, release block
382 *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
384 allocation += curr->size + sizeof(struct xlat_cache_block);
386 if( allocation > size ) {
389 if( curr->size == 0 ) { /* End-of-cache Sentinel */
390 /* Leave what we just released as free space and start again from the
393 start_block->active = 0;
394 start_block->size = allocation;
395 allocation = (int)-sizeof(struct xlat_cache_block);
396 start_block = curr = xlat_temp_cache;
399 start_block->active = 1;
400 start_block->size = allocation;
401 start_block->lut_entry = block->lut_entry;
402 start_block->chain = block->chain;
403 start_block->fpscr_mask = block->fpscr_mask;
404 start_block->fpscr = block->fpscr;
405 start_block->recover_table_offset = block->recover_table_offset;
406 start_block->recover_table_size = block->recover_table_size;
407 *block->lut_entry = &start_block->code;
408 memcpy( start_block->code, block->code, block->size );
409 xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
410 if( xlat_temp_cache_ptr->size == 0 ) {
411 xlat_temp_cache_ptr = xlat_temp_cache;
416 void xlat_promote_to_temp_space( xlat_cache_block_t block )
418 *block->lut_entry = block->chain;
419 xlat_delete_block(block);
424 * Returns the next block in the new cache list that can be written to by the
425 * translator. If the next block is active, it is evicted first.
427 xlat_cache_block_t xlat_start_block( sh4addr_t address )
429 if( xlat_new_cache_ptr->size == 0 ) {
430 xlat_new_cache_ptr = xlat_new_cache;
433 if( xlat_new_cache_ptr->active ) {
434 xlat_promote_to_temp_space( xlat_new_cache_ptr );
436 xlat_new_create_ptr = xlat_new_cache_ptr;
437 xlat_new_create_ptr->active = 1;
438 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
440 /* Add the LUT entry for the block */
441 void **p = xlat_get_lut_entry(address);
443 if( IS_ENTRY_POINT(entry) ) {
444 xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
445 assert( oldblock->active );
446 xlat_new_create_ptr->chain = XLAT_CODE_ADDR(entry);
448 xlat_new_create_ptr->chain = NULL;
450 xlat_new_create_ptr->use_list = NULL;
452 *p = &xlat_new_create_ptr->code;
453 if( IS_ENTRY_CONTINUATION(entry) ) {
454 *((uintptr_t *)p) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
456 xlat_new_create_ptr->lut_entry = p;
458 return xlat_new_create_ptr;
461 xlat_cache_block_t xlat_extend_block( uint32_t newSize )
463 assert( xlat_new_create_ptr->use_list == NULL );
464 while( xlat_new_create_ptr->size < newSize ) {
465 if( xlat_new_cache_ptr->size == 0 ) {
466 /* Migrate to the front of the cache to keep it contiguous */
467 xlat_new_create_ptr->active = 0;
468 sh4ptr_t olddata = xlat_new_create_ptr->code;
469 int oldsize = xlat_new_create_ptr->size;
470 int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
471 void **lut_entry = xlat_new_create_ptr->lut_entry;
472 void *chain = xlat_new_create_ptr->chain;
473 int allocation = (int)-sizeof(struct xlat_cache_block);
474 xlat_new_cache_ptr = xlat_new_cache;
476 if( xlat_new_cache_ptr->active ) {
477 xlat_promote_to_temp_space( xlat_new_cache_ptr );
479 allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
480 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
481 } while( allocation < size );
482 xlat_new_create_ptr = xlat_new_cache;
483 xlat_new_create_ptr->active = 1;
484 xlat_new_create_ptr->size = allocation;
485 xlat_new_create_ptr->lut_entry = lut_entry;
486 xlat_new_create_ptr->chain = chain;
487 xlat_new_create_ptr->use_list = NULL;
488 *lut_entry = &xlat_new_create_ptr->code;
489 memmove( xlat_new_create_ptr->code, olddata, oldsize );
491 if( xlat_new_cache_ptr->active ) {
492 xlat_promote_to_temp_space( xlat_new_cache_ptr );
494 xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
495 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
498 return xlat_new_create_ptr;
502 void xlat_commit_block( uint32_t destsize, sh4addr_t startpc, sh4addr_t endpc )
504 void **entry = xlat_get_lut_entry(startpc+2);
505 /* assume main entry has already been set at this point */
507 for( sh4addr_t pc = startpc+2; pc < endpc; pc += 2 ) {
508 if( XLAT_LUT_ENTRY(pc) == 0 )
509 entry = xlat_get_lut_entry(pc);
510 *((uintptr_t *)entry) |= (uintptr_t)XLAT_LUT_ENTRY_USED;
514 xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
517 void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
520 xlat_cache_block_t tail =
521 (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
523 assert( tail->active == 1 );
524 assert( tail->size == 0 );
525 while( cache < tail ) {
526 assert( cache->active >= 0 && cache->active <= 2 );
527 assert( cache->size >= 0 && cache->size < size );
533 assert( cache == tail );
534 assert( foundptr == 1 || tail == ptr );
538 * Perform a reverse lookup to determine the SH4 address corresponding to
539 * the start of the code block containing ptr. This is _slow_ - it does a
540 * linear scan of the lookup table to find this.
542 * If the pointer cannot be found in any live block, returns -1 (as this
545 sh4addr_t xlat_get_address( unsigned char *ptr )
548 for( i=0; i<XLAT_LUT_PAGES; i++ ) {
549 void **page = xlat_lut[i];
551 for( j=0; j<XLAT_LUT_PAGE_ENTRIES; j++ ) {
552 void *entry = page[j];
553 if( ((uintptr_t)entry) > (uintptr_t)XLAT_LUT_ENTRY_USED ) {
554 xlat_cache_block_t block = XLAT_BLOCK_FOR_LUT_ENTRY(entry);
555 if( ptr >= block->code && ptr < block->code + block->size) {
557 return (i<<13) | (j<<1);
567 * Sanity check that the given pointer is at least contained in one of cache
568 * regions, and has a sane-ish size. We don't do a full region walk atm.
570 gboolean xlat_is_code_pointer( void *p )
573 uintptr_t region_size;
575 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
576 if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) {
577 /* Pointer is in new cache */
578 region = (char *)xlat_new_cache;
579 region_size = XLAT_NEW_CACHE_SIZE;
581 #ifdef XLAT_GENERATIONAL_CACHE
582 else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) {
583 /* Pointer is in temp cache */
584 region = (char *)xlat_temp_cache;
585 region_size = XLAT_TEMP_CACHE_SIZE;
586 } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) {
587 /* Pointer is in old cache */
588 region = (char *)xlat_old_cache;
589 region_size = XLAT_OLD_CACHE_SIZE;
593 /* Not a valid cache pointer */
597 /* Make sure the whole block is in the region */
598 if( (((char *)p) - region) >= region_size ||
599 (((char *)(NEXT(block))) - region) >= region_size )
604 void xlat_check_integrity( )
606 xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
607 #ifdef XLAT_GENERATIONAL_CACHE
608 xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
609 xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
613 unsigned int xlat_get_active_block_count()
615 unsigned int count = 0;
616 xlat_cache_block_t ptr = xlat_new_cache;
617 while( ptr->size != 0 ) {
618 if( ptr->active != 0 ) {
626 unsigned int xlat_get_active_blocks( struct xlat_block_ref *blocks, unsigned int size )
628 unsigned int count = 0;
629 xlat_cache_block_t ptr = xlat_new_cache;
630 while( ptr->size != 0 ) {
631 if( ptr->active != 0 ) {
632 blocks[count].block = ptr;
633 blocks[count].pc = 0;
643 static void xlat_get_block_pcs( struct xlat_block_ref *blocks, unsigned int size )
646 for( i=0; i<XLAT_LUT_PAGES;i ++ ) {
647 void **page = xlat_lut[i];
649 for( unsigned j=0; j < XLAT_LUT_PAGE_ENTRIES; j++ ) {
650 void *code = XLAT_CODE_ADDR(page[j]);
652 xlat_cache_block_t ptr = XLAT_BLOCK_FOR_CODE(code);
653 sh4addr_t pc = XLAT_ADDR_FROM_ENTRY(i,j);
654 for( unsigned k=0; k<size; k++ ) {
655 if( blocks[k].block == ptr ) {
661 ptr = XLAT_BLOCK_FOR_CODE(ptr);
672 static int xlat_compare_active_field( const void *a, const void *b )
674 const struct xlat_block_ref *ptra = (const struct xlat_block_ref *)a;
675 const struct xlat_block_ref *ptrb = (const struct xlat_block_ref *)b;
676 return ptrb->block->active - ptra->block->active;
679 unsigned int xlat_get_cache_blocks_by_activity( xlat_block_ref_t outblocks, size_t topN )
682 int count = xlat_get_active_block_count();
684 struct xlat_block_ref blocks[count];
685 xlat_get_active_blocks(blocks, count);
686 xlat_get_block_pcs(blocks,count);
687 qsort(blocks, count, sizeof(struct xlat_block_ref), xlat_compare_active_field);
691 memcpy(outblocks, blocks, topN*sizeof(struct xlat_block_ref));
.