Search
lxdream.org :: lxdream/src/xlat/xltcache.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/xlat/xltcache.c
changeset 1149:da6124fceec6
prev1126:1f2c7cdee73e
next1175:712c418cad83
author nkeynes
date Wed Nov 10 08:37:42 2010 +1000 (13 years ago)
permissions -rw-r--r--
last change Add chain pointer to the xlat cache, so that we can maintain multiple blocks
for the same address. This prevents thrashing in cases where we would other
keep retranslating the same blocks over and over again due to varying
xlat_sh4_mode values
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Translation cache management. This part is architecture independent.
     5  *
     6  * Copyright (c) 2005 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    19 #include <sys/types.h>
    20 #include <sys/mman.h>
    21 #include <assert.h>
    23 #include "dreamcast.h"
    24 #include "sh4/sh4core.h"
    25 #include "xlat/xltcache.h"
    26 #include "x86dasm/x86dasm.h"
    28 #define XLAT_LUT_PAGE_BITS 12
    29 #define XLAT_LUT_TOTAL_BITS 28
    30 #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
    31 #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
    33 #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
    34 #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
    35 #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
    37 #define XLAT_LUT_ENTRY_EMPTY (void *)0
    38 #define XLAT_LUT_ENTRY_USED  (void *)1
    40 #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
    41 #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
    42 #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
    44 #define MIN_BLOCK_SIZE 32
    45 #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
    47 #define BLOCK_INACTIVE 0
    48 #define BLOCK_ACTIVE 1
    49 #define BLOCK_USED 2
    51 xlat_cache_block_t xlat_new_cache;
    52 xlat_cache_block_t xlat_new_cache_ptr;
    53 xlat_cache_block_t xlat_new_create_ptr;
    55 #ifdef XLAT_GENERATIONAL_CACHE
    56 xlat_cache_block_t xlat_temp_cache;
    57 xlat_cache_block_t xlat_temp_cache_ptr;
    58 xlat_cache_block_t xlat_old_cache;
    59 xlat_cache_block_t xlat_old_cache_ptr;
    60 #endif
    62 static void **xlat_lut[XLAT_LUT_PAGES];
    63 static gboolean xlat_initialized = FALSE;
    65 void xlat_cache_init(void) 
    66 {
    67     if( !xlat_initialized ) {
    68         xlat_initialized = TRUE;
    69         xlat_new_cache = (xlat_cache_block_t)mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    70                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    71         xlat_new_cache_ptr = xlat_new_cache;
    72         xlat_new_create_ptr = xlat_new_cache;
    73 #ifdef XLAT_GENERATIONAL_CACHE
    74         xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    75                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    76         xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
    77                 MAP_PRIVATE|MAP_ANON, -1, 0 );
    78         xlat_temp_cache_ptr = xlat_temp_cache;
    79         xlat_old_cache_ptr = xlat_old_cache;
    80 #endif
    81 //        xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
    82 //                MAP_PRIVATE|MAP_ANON, -1, 0);
    83         memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
    84     }
    85     xlat_flush_cache();
    86 }
    88 /**
    89  * Reset the cache structure to its default state
    90  */
    91 void xlat_flush_cache() 
    92 {
    93     xlat_cache_block_t tmp;
    94     int i;
    95     xlat_new_cache_ptr = xlat_new_cache;
    96     xlat_new_cache_ptr->active = 0;
    97     xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
    98     tmp = NEXT(xlat_new_cache_ptr);
    99     tmp->active = 1;
   100     tmp->size = 0;
   101 #ifdef XLAT_GENERATIONAL_CACHE
   102     xlat_temp_cache_ptr = xlat_temp_cache;
   103     xlat_temp_cache_ptr->active = 0;
   104     xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   105     tmp = NEXT(xlat_temp_cache_ptr);
   106     tmp->active = 1;
   107     tmp->size = 0;
   108     xlat_old_cache_ptr = xlat_old_cache;
   109     xlat_old_cache_ptr->active = 0;
   110     xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
   111     tmp = NEXT(xlat_old_cache_ptr);
   112     tmp->active = 1;
   113     tmp->size = 0;
   114 #endif
   115     for( i=0; i<XLAT_LUT_PAGES; i++ ) {
   116         if( xlat_lut[i] != NULL ) {
   117             memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
   118         }
   119     }
   120 }
   122 static void xlat_flush_page_by_lut( void **page )
   123 {
   124     int i;
   125     for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
   126         if( IS_ENTRY_POINT(page[i]) ) {
   127             void *p = page[i];
   128             do {
   129                 xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
   130                 block->active = 0;
   131                 p = block->chain;
   132             } while( p != NULL );
   133         }
   134         page[i] = NULL;
   135     }
   136 }
   138 void FASTCALL xlat_invalidate_word( sh4addr_t addr )
   139 {
   140     void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
   141     if( page != NULL ) {
   142         int entry = XLAT_LUT_ENTRY(addr);
   143         if( page[entry] != NULL ) {
   144             xlat_flush_page_by_lut(page);
   145         }
   146     }
   147 }
   149 void FASTCALL xlat_invalidate_long( sh4addr_t addr )
   150 {
   151     void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
   152     if( page != NULL ) {
   153         int entry = XLAT_LUT_ENTRY(addr);
   154         if( *(uint64_t *)&page[entry] != 0 ) {
   155             xlat_flush_page_by_lut(page);
   156         }
   157     }
   158 }
   160 void FASTCALL xlat_invalidate_block( sh4addr_t address, size_t size )
   161 {
   162     int i;
   163     int entry_count = size >> 1; // words;
   164     uint32_t page_no = XLAT_LUT_PAGE(address);
   165     int entry = XLAT_LUT_ENTRY(address);
   166     do {
   167         void **page = xlat_lut[page_no];
   168         int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
   169         if( entry_count < page_entries ) {
   170             page_entries = entry_count;
   171         }
   172         if( page != NULL ) {
   173             if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
   174                 /* Overwriting the entire page anyway */
   175                 xlat_flush_page_by_lut(page);
   176             } else {
   177                 for( i=entry; i<entry+page_entries; i++ ) {
   178                     if( page[i] != NULL ) {
   179                         xlat_flush_page_by_lut(page);
   180                         break;
   181                     }
   182                 }
   183             }
   184             entry_count -= page_entries;
   185         }
   186         page_no ++;
   187         entry_count -= page_entries;
   188         entry = 0;
   189     } while( entry_count > 0 );
   190 }
   192 void FASTCALL xlat_flush_page( sh4addr_t address )
   193 {
   194     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   195     if( page != NULL ) {
   196         xlat_flush_page_by_lut(page);
   197     }
   198 }
   200 void * FASTCALL xlat_get_code( sh4addr_t address )
   201 {
   202     void *result = NULL;
   203     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   204     if( page != NULL ) {
   205         result = (void *)(((uintptr_t)(page[XLAT_LUT_ENTRY(address)])) & (~((uintptr_t)0x03)));
   206     }
   207     return result;
   208 }
   210 xlat_recovery_record_t xlat_get_pre_recovery( void *code, void *native_pc )
   211 {
   212     if( code != NULL ) {
   213         uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
   214         xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(code);
   215         uint32_t count = block->recover_table_size;
   216         xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
   217         uint32_t posn;
   218         for( posn = 1; posn < count; posn++ ) {
   219         	if( records[posn].xlat_offset >= pc_offset ) {
   220         		return &records[posn-1];
   221         	}
   222         }
   223         return &records[count-1];
   224     }
   225     return NULL;	
   226 }
   228 void ** FASTCALL xlat_get_lut_entry( sh4addr_t address )
   229 {
   230     void **page = xlat_lut[XLAT_LUT_PAGE(address)];
   232     /* Add the LUT entry for the block */
   233     if( page == NULL ) {
   234         xlat_lut[XLAT_LUT_PAGE(address)] = page =
   235             (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
   236                     MAP_PRIVATE|MAP_ANON, -1, 0 );
   237         memset( page, 0, XLAT_LUT_PAGE_SIZE );
   238     }
   240     return &page[XLAT_LUT_ENTRY(address)];
   241 }
   245 uint32_t FASTCALL xlat_get_block_size( void *block )
   246 {
   247     xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
   248     return xlt->size;
   249 }
   251 uint32_t FASTCALL xlat_get_code_size( void *block )
   252 {
   253     xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
   254     if( xlt->recover_table_offset == 0 ) {
   255         return xlt->size;
   256     } else {
   257         return xlt->recover_table_offset;
   258     }
   259 }
   261 /**
   262  * Cut the specified block so that it has the given size, with the remaining data
   263  * forming a new free block. If the free block would be less than the minimum size,
   264  * the cut is not performed.
   265  * @return the next block after the (possibly cut) block.
   266  */
   267 static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
   268 {
   269     cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
   270     assert( cutsize <= block->size );
   271     if( block->size > cutsize + MIN_TOTAL_SIZE ) {
   272         int oldsize = block->size;
   273         block->size = cutsize;
   274         xlat_cache_block_t next = NEXT(block);
   275         next->active = 0;
   276         next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
   277         return next;
   278     } else {
   279         return NEXT(block);
   280     }
   281 }
   283 #ifdef XLAT_GENERATIONAL_CACHE
   284 /**
   285  * Promote a block in temp space (or elsewhere for that matter) to old space.
   286  *
   287  * @param block to promote.
   288  */
   289 static void xlat_promote_to_old_space( xlat_cache_block_t block )
   290 {
   291     int allocation = (int)-sizeof(struct xlat_cache_block);
   292     int size = block->size;
   293     xlat_cache_block_t curr = xlat_old_cache_ptr;
   294     xlat_cache_block_t start_block = curr;
   295     do {
   296         allocation += curr->size + sizeof(struct xlat_cache_block);
   297         curr = NEXT(curr);
   298         if( allocation > size ) {
   299             break; /* done */
   300         }
   301         if( curr->size == 0 ) { /* End-of-cache Sentinel */
   302             /* Leave what we just released as free space and start again from the
   303              * top of the cache
   304              */
   305             start_block->active = 0;
   306             start_block->size = allocation;
   307             allocation = (int)-sizeof(struct xlat_cache_block);
   308             start_block = curr = xlat_old_cache;
   309         }
   310     } while(1);
   311     start_block->active = 1;
   312     start_block->size = allocation;
   313     start_block->lut_entry = block->lut_entry;
   314     start_block->chain = block->chain;
   315     start_block->fpscr_mask = block->fpscr_mask;
   316     start_block->fpscr = block->fpscr;
   317     start_block->recover_table_offset = block->recover_table_offset;
   318     start_block->recover_table_size = block->recover_table_size;
   319     *block->lut_entry = &start_block->code;
   320     memcpy( start_block->code, block->code, block->size );
   321     xlat_old_cache_ptr = xlat_cut_block(start_block, size );
   322     if( xlat_old_cache_ptr->size == 0 ) {
   323         xlat_old_cache_ptr = xlat_old_cache;
   324     }
   325 }
   327 /**
   328  * Similarly to the above method, promotes a block to temp space.
   329  * TODO: Try to combine these - they're nearly identical
   330  */
   331 void xlat_promote_to_temp_space( xlat_cache_block_t block )
   332 {
   333     int size = block->size;
   334     int allocation = (int)-sizeof(struct xlat_cache_block);
   335     xlat_cache_block_t curr = xlat_temp_cache_ptr;
   336     xlat_cache_block_t start_block = curr;
   337     do {
   338         if( curr->active == BLOCK_USED ) {
   339             xlat_promote_to_old_space( curr );
   340         } else if( curr->active == BLOCK_ACTIVE ) {
   341             // Active but not used, release block
   342             *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
   343         }
   344         allocation += curr->size + sizeof(struct xlat_cache_block);
   345         curr = NEXT(curr);
   346         if( allocation > size ) {
   347             break; /* done */
   348         }
   349         if( curr->size == 0 ) { /* End-of-cache Sentinel */
   350             /* Leave what we just released as free space and start again from the
   351              * top of the cache
   352              */
   353             start_block->active = 0;
   354             start_block->size = allocation;
   355             allocation = (int)-sizeof(struct xlat_cache_block);
   356             start_block = curr = xlat_temp_cache;
   357         }
   358     } while(1);
   359     start_block->active = 1;
   360     start_block->size = allocation;
   361     start_block->lut_entry = block->lut_entry;
   362     start_block->chain = block->chain;
   363     start_block->fpscr_mask = block->fpscr_mask;
   364     start_block->fpscr = block->fpscr;
   365     start_block->recover_table_offset = block->recover_table_offset;
   366     start_block->recover_table_size = block->recover_table_size;
   367     *block->lut_entry = &start_block->code;
   368     memcpy( start_block->code, block->code, block->size );
   369     xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
   370     if( xlat_temp_cache_ptr->size == 0 ) {
   371         xlat_temp_cache_ptr = xlat_temp_cache;
   372     }
   374 }
   375 #else 
   376 void xlat_promote_to_temp_space( xlat_cache_block_t block )
   377 {
   378     *block->lut_entry = 0;
   379 }
   380 #endif
   382 /**
   383  * Returns the next block in the new cache list that can be written to by the
   384  * translator. If the next block is active, it is evicted first.
   385  */
   386 xlat_cache_block_t xlat_start_block( sh4addr_t address )
   387 {
   388     if( xlat_new_cache_ptr->size == 0 ) {
   389         xlat_new_cache_ptr = xlat_new_cache;
   390     }
   392     if( xlat_new_cache_ptr->active ) {
   393         xlat_promote_to_temp_space( xlat_new_cache_ptr );
   394     }
   395     xlat_new_create_ptr = xlat_new_cache_ptr;
   396     xlat_new_create_ptr->active = 1;
   397     xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   399     /* Add the LUT entry for the block */
   400     if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) {
   401         xlat_lut[XLAT_LUT_PAGE(address)] =
   402             (void **)mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
   403                     MAP_PRIVATE|MAP_ANON, -1, 0 );
   404         memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE );
   405     }
   407     if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) {
   408         void *p = xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)];
   409         xlat_cache_block_t oldblock = XLAT_BLOCK_FOR_CODE(p);
   410         assert( oldblock->active );
   411         xlat_new_create_ptr->chain = p;
   412     } else {
   413         xlat_new_create_ptr->chain = NULL;
   414     }
   416     xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] = 
   417         &xlat_new_create_ptr->code;
   418     xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address);
   420     return xlat_new_create_ptr;
   421 }
   423 xlat_cache_block_t xlat_extend_block( uint32_t newSize )
   424 {
   425     while( xlat_new_create_ptr->size < newSize ) {
   426         if( xlat_new_cache_ptr->size == 0 ) {
   427             /* Migrate to the front of the cache to keep it contiguous */
   428             xlat_new_create_ptr->active = 0;
   429             sh4ptr_t olddata = xlat_new_create_ptr->code;
   430             int oldsize = xlat_new_create_ptr->size;
   431             int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
   432             void **lut_entry = xlat_new_create_ptr->lut_entry;
   433             void *chain = xlat_new_create_ptr->chain;
   434             int allocation = (int)-sizeof(struct xlat_cache_block);
   435             xlat_new_cache_ptr = xlat_new_cache;
   436             do {
   437                 if( xlat_new_cache_ptr->active ) {
   438                     xlat_promote_to_temp_space( xlat_new_cache_ptr );
   439                 }
   440                 allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
   441                 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   442             } while( allocation < size );
   443             xlat_new_create_ptr = xlat_new_cache;
   444             xlat_new_create_ptr->active = 1;
   445             xlat_new_create_ptr->size = allocation;
   446             xlat_new_create_ptr->lut_entry = lut_entry;
   447             xlat_new_create_ptr->chain = chain;
   448             *lut_entry = &xlat_new_create_ptr->code;
   449             memmove( xlat_new_create_ptr->code, olddata, oldsize );
   450         } else {
   451             if( xlat_new_cache_ptr->active ) {
   452                 xlat_promote_to_temp_space( xlat_new_cache_ptr );
   453             }
   454             xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
   455             xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
   456         }
   457     }
   458     return xlat_new_create_ptr;
   460 }
   462 void xlat_commit_block( uint32_t destsize, uint32_t srcsize )
   463 {
   464     void **ptr = xlat_new_create_ptr->lut_entry;
   465     void **endptr = ptr + (srcsize>>1);
   466     while( ptr < endptr ) {
   467         if( *ptr == NULL ) {
   468             *ptr = XLAT_LUT_ENTRY_USED;
   469         }
   470         ptr++;
   471     }
   473     xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
   474 }
   476 void xlat_delete_block( xlat_cache_block_t block ) 
   477 {
   478     block->active = 0;
   479     *block->lut_entry = NULL;
   480 }
   482 void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
   483 {
   484     int foundptr = 0;
   485     xlat_cache_block_t tail = 
   486         (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
   488     assert( tail->active == 1 );
   489     assert( tail->size == 0 ); 
   490     while( cache < tail ) {
   491         assert( cache->active >= 0 && cache->active <= 2 );
   492         assert( cache->size >= 0 && cache->size < size );
   493         if( cache == ptr ) {
   494             foundptr = 1;
   495         }
   496         cache = NEXT(cache);
   497     }
   498     assert( cache == tail );
   499     assert( foundptr == 1 || tail == ptr );
   500 }
   502 /**
   503  * Sanity check that the given pointer is at least contained in one of cache
   504  * regions, and has a sane-ish size. We don't do a full region walk atm.
   505  */
   506 gboolean xlat_is_code_pointer( void *p )
   507 {
   508     char *region;
   509     uintptr_t region_size;
   511     xlat_cache_block_t block = XLAT_BLOCK_FOR_CODE(p);
   512     if( (((char *)block) - (char *)xlat_new_cache) < XLAT_NEW_CACHE_SIZE ) {
   513          /* Pointer is in new cache */
   514         region = (char *)xlat_new_cache;
   515         region_size = XLAT_NEW_CACHE_SIZE;
   516     }
   517 #ifdef XLAT_GENERATIONAL_CACHE
   518     else if( (((char *)block) - (char *)xlat_temp_cache) < XLAT_TEMP_CACHE_SIZE ) {
   519          /* Pointer is in temp cache */
   520         region = (char *)xlat_temp_cache;
   521         region_size = XLAT_TEMP_CACHE_SIZE;
   522     } else if( (((char *)block) - (char *)xlat_odl_cache) < XLAT_OLD_CACHE_SIZE ) {
   523         /* Pointer is in old cache */
   524         region = (char *)xlat_old_cache;
   525         region_size = XLAT_OLD_CACHE_SIZE;
   526     }
   527 #endif
   528     else {
   529         /* Not a valid cache pointer */
   530         return FALSE;
   531     }
   533     /* Make sure the whole block is in the region */
   534     if( (((char *)p) - region) >= region_size ||
   535         (((char *)(NEXT(block))) - region) >= region_size )
   536         return FALSE;
   537     return TRUE;
   538 }
   540 void xlat_check_integrity( )
   541 {
   542     xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
   543 #ifdef XLAT_GENERATIONAL_CACHE
   544     xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
   545     xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
   546 #endif
   547 }
.