filename | src/sh4/xltcache.c |
changeset | 359:c588dce7ebde |
next | 366:6fb0d05152d7 |
author | nkeynes |
date | Thu Aug 23 12:33:27 2007 +0000 (15 years ago) |
permissions | -rw-r--r-- |
last change | Commit decoder generator Translator work in progress Fix mac.l, mac.w in emu core |
file | annotate | diff | log | raw |
nkeynes@359 | 1 | /** |
nkeynes@359 | 2 | * $Id: xltcache.c,v 1.1 2007-08-23 12:33:27 nkeynes Exp $ |
nkeynes@359 | 3 | * |
nkeynes@359 | 4 | * Translation cache management. This part is architecture independent. |
nkeynes@359 | 5 | * |
nkeynes@359 | 6 | * Copyright (c) 2005 Nathan Keynes. |
nkeynes@359 | 7 | * |
nkeynes@359 | 8 | * This program is free software; you can redistribute it and/or modify |
nkeynes@359 | 9 | * it under the terms of the GNU General Public License as published by |
nkeynes@359 | 10 | * the Free Software Foundation; either version 2 of the License, or |
nkeynes@359 | 11 | * (at your option) any later version. |
nkeynes@359 | 12 | * |
nkeynes@359 | 13 | * This program is distributed in the hope that it will be useful, |
nkeynes@359 | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
nkeynes@359 | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
nkeynes@359 | 16 | * GNU General Public License for more details. |
nkeynes@359 | 17 | */ |
nkeynes@359 | 18 | |
nkeynes@359 | 19 | #include "sh4/xltcache.h" |
nkeynes@359 | 20 | #include "dreamcast.h" |
nkeynes@359 | 21 | #include <sys/mman.h> |
nkeynes@359 | 22 | #include <assert.h> |
nkeynes@359 | 23 | |
nkeynes@359 | 24 | #define XLAT_LUT_PAGE_BITS 12 |
nkeynes@359 | 25 | #define XLAT_LUT_TOTAL_BITS 28 |
nkeynes@359 | 26 | #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF) |
nkeynes@359 | 27 | #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1) |
nkeynes@359 | 28 | |
nkeynes@359 | 29 | #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS)) |
nkeynes@359 | 30 | #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS) |
nkeynes@359 | 31 | #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *)) |
nkeynes@359 | 32 | |
nkeynes@359 | 33 | #define XLAT_LUT_ENTRY_EMPTY (void *)0 |
nkeynes@359 | 34 | #define XLAT_LUT_ENTRY_USED (void *)1 |
nkeynes@359 | 35 | |
nkeynes@359 | 36 | #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size])) |
nkeynes@359 | 37 | #define BLOCK_FOR_CODE(code) (((xlat_cache_block_t)code)-1) |
nkeynes@359 | 38 | #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED) |
nkeynes@359 | 39 | #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY) |
nkeynes@359 | 40 | |
nkeynes@359 | 41 | #define MIN_BLOCK_SIZE 32 |
nkeynes@359 | 42 | #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE) |
nkeynes@359 | 43 | |
nkeynes@359 | 44 | #define BLOCK_INACTIVE 0 |
nkeynes@359 | 45 | #define BLOCK_ACTIVE 1 |
nkeynes@359 | 46 | #define BLOCK_USED 2 |
nkeynes@359 | 47 | |
nkeynes@359 | 48 | xlat_cache_block_t xlat_new_cache; |
nkeynes@359 | 49 | xlat_cache_block_t xlat_new_cache_ptr; |
nkeynes@359 | 50 | xlat_cache_block_t xlat_new_create_ptr; |
nkeynes@359 | 51 | xlat_cache_block_t xlat_temp_cache; |
nkeynes@359 | 52 | xlat_cache_block_t xlat_temp_cache_ptr; |
nkeynes@359 | 53 | xlat_cache_block_t xlat_old_cache; |
nkeynes@359 | 54 | xlat_cache_block_t xlat_old_cache_ptr; |
nkeynes@359 | 55 | static void ***xlat_lut; |
nkeynes@359 | 56 | static void **xlat_lut2; /* second-tier page info */ |
nkeynes@359 | 57 | |
nkeynes@359 | 58 | void xlat_cache_init() |
nkeynes@359 | 59 | { |
nkeynes@359 | 60 | xlat_new_cache = mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, |
nkeynes@359 | 61 | MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 ); |
nkeynes@359 | 62 | xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, |
nkeynes@359 | 63 | MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 ); |
nkeynes@359 | 64 | xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, |
nkeynes@359 | 65 | MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 ); |
nkeynes@359 | 66 | xlat_new_cache_ptr = xlat_new_cache; |
nkeynes@359 | 67 | xlat_temp_cache_ptr = xlat_temp_cache; |
nkeynes@359 | 68 | xlat_old_cache_ptr = xlat_old_cache; |
nkeynes@359 | 69 | xlat_new_create_ptr = xlat_new_cache; |
nkeynes@359 | 70 | |
nkeynes@359 | 71 | xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE, |
nkeynes@359 | 72 | MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
nkeynes@359 | 73 | memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) ); |
nkeynes@359 | 74 | |
nkeynes@359 | 75 | xlat_flush_cache(); |
nkeynes@359 | 76 | } |
nkeynes@359 | 77 | |
nkeynes@359 | 78 | /** |
nkeynes@359 | 79 | * Reset the cache structure to its default state |
nkeynes@359 | 80 | */ |
nkeynes@359 | 81 | void xlat_flush_cache() |
nkeynes@359 | 82 | { |
nkeynes@359 | 83 | xlat_cache_block_t tmp; |
nkeynes@359 | 84 | int i; |
nkeynes@359 | 85 | xlat_new_cache_ptr = xlat_new_cache; |
nkeynes@359 | 86 | xlat_new_cache_ptr->active = 0; |
nkeynes@359 | 87 | xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block); |
nkeynes@359 | 88 | tmp = NEXT(xlat_new_cache_ptr); |
nkeynes@359 | 89 | tmp->active = 1; |
nkeynes@359 | 90 | tmp->size = 0; |
nkeynes@359 | 91 | xlat_temp_cache_ptr = xlat_temp_cache; |
nkeynes@359 | 92 | xlat_temp_cache_ptr->active = 0; |
nkeynes@359 | 93 | xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block); |
nkeynes@359 | 94 | tmp = NEXT(xlat_temp_cache_ptr); |
nkeynes@359 | 95 | tmp->active = 1; |
nkeynes@359 | 96 | tmp->size = 0; |
nkeynes@359 | 97 | xlat_old_cache_ptr = xlat_old_cache; |
nkeynes@359 | 98 | xlat_old_cache_ptr->active = 0; |
nkeynes@359 | 99 | xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block); |
nkeynes@359 | 100 | tmp = NEXT(xlat_old_cache_ptr); |
nkeynes@359 | 101 | tmp->active = 1; |
nkeynes@359 | 102 | tmp->size = 0; |
nkeynes@359 | 103 | for( i=0; i<XLAT_LUT_PAGES; i++ ) { |
nkeynes@359 | 104 | if( xlat_lut[i] != NULL ) { |
nkeynes@359 | 105 | memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE ); |
nkeynes@359 | 106 | } |
nkeynes@359 | 107 | } |
nkeynes@359 | 108 | } |
nkeynes@359 | 109 | |
nkeynes@359 | 110 | void xlat_flush_page( sh4addr_t address ) |
nkeynes@359 | 111 | { |
nkeynes@359 | 112 | int i; |
nkeynes@359 | 113 | void **page = xlat_lut[XLAT_LUT_PAGE(address)]; |
nkeynes@359 | 114 | for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) { |
nkeynes@359 | 115 | if( IS_ENTRY_POINT(page[i]) ) { |
nkeynes@359 | 116 | BLOCK_FOR_CODE(page[i])->active = 0; |
nkeynes@359 | 117 | } |
nkeynes@359 | 118 | page[i] = NULL; |
nkeynes@359 | 119 | } |
nkeynes@359 | 120 | } |
nkeynes@359 | 121 | |
nkeynes@359 | 122 | void *xlat_get_code( sh4addr_t address ) |
nkeynes@359 | 123 | { |
nkeynes@359 | 124 | void **page = xlat_lut[XLAT_LUT_PAGE(address)]; |
nkeynes@359 | 125 | if( page == NULL ) { |
nkeynes@359 | 126 | return NULL; |
nkeynes@359 | 127 | } |
nkeynes@359 | 128 | return page[XLAT_LUT_ENTRY(address)]; |
nkeynes@359 | 129 | } |
nkeynes@359 | 130 | |
nkeynes@359 | 131 | /** |
nkeynes@359 | 132 | * Cut the specified block so that it has the given size, with the remaining data |
nkeynes@359 | 133 | * forming a new free block. If the free block would be less than the minimum size, |
nkeynes@359 | 134 | * the cut is not performed. |
nkeynes@359 | 135 | * @return the next block after the (possibly cut) block. |
nkeynes@359 | 136 | */ |
nkeynes@359 | 137 | static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize ) |
nkeynes@359 | 138 | { |
nkeynes@359 | 139 | if( block->size > cutsize + MIN_TOTAL_SIZE ) { |
nkeynes@359 | 140 | int oldsize = block->size; |
nkeynes@359 | 141 | block->size = cutsize; |
nkeynes@359 | 142 | xlat_cache_block_t next = NEXT(block); |
nkeynes@359 | 143 | next->active = 0; |
nkeynes@359 | 144 | next->size = oldsize - cutsize - sizeof(struct xlat_cache_block); |
nkeynes@359 | 145 | return next; |
nkeynes@359 | 146 | } else { |
nkeynes@359 | 147 | return NEXT(block); |
nkeynes@359 | 148 | } |
nkeynes@359 | 149 | } |
nkeynes@359 | 150 | |
nkeynes@359 | 151 | /** |
nkeynes@359 | 152 | * Promote a block in temp space (or elsewhere for that matter) to old space. |
nkeynes@359 | 153 | * |
nkeynes@359 | 154 | * @param block to promote. |
nkeynes@359 | 155 | */ |
nkeynes@359 | 156 | static void xlat_promote_to_old_space( xlat_cache_block_t block ) |
nkeynes@359 | 157 | { |
nkeynes@359 | 158 | int allocation = -sizeof(struct xlat_cache_block); |
nkeynes@359 | 159 | int size = block->size; |
nkeynes@359 | 160 | xlat_cache_block_t curr = xlat_old_cache_ptr; |
nkeynes@359 | 161 | xlat_cache_block_t start_block = curr; |
nkeynes@359 | 162 | do { |
nkeynes@359 | 163 | allocation += curr->size + sizeof(struct xlat_cache_block); |
nkeynes@359 | 164 | curr = NEXT(curr); |
nkeynes@359 | 165 | if( allocation > size ) { |
nkeynes@359 | 166 | break; /* done */ |
nkeynes@359 | 167 | } |
nkeynes@359 | 168 | if( curr->size == 0 ) { /* End-of-cache Sentinel */ |
nkeynes@359 | 169 | /* Leave what we just released as free space and start again from the |
nkeynes@359 | 170 | * top of the cache |
nkeynes@359 | 171 | */ |
nkeynes@359 | 172 | start_block->active = 0; |
nkeynes@359 | 173 | start_block->size = allocation; |
nkeynes@359 | 174 | allocation = -sizeof(struct xlat_cache_block); |
nkeynes@359 | 175 | start_block = curr = xlat_old_cache; |
nkeynes@359 | 176 | } |
nkeynes@359 | 177 | } while(1); |
nkeynes@359 | 178 | start_block->active = 1; |
nkeynes@359 | 179 | start_block->size = allocation; |
nkeynes@359 | 180 | start_block->lut_entry = block->lut_entry; |
nkeynes@359 | 181 | *block->lut_entry = &start_block->code; |
nkeynes@359 | 182 | memcpy( start_block->code, block->code, block->size ); |
nkeynes@359 | 183 | xlat_old_cache_ptr = xlat_cut_block(start_block, size ); |
nkeynes@359 | 184 | if( xlat_old_cache_ptr->size == 0 ) { |
nkeynes@359 | 185 | xlat_old_cache_ptr = xlat_old_cache; |
nkeynes@359 | 186 | } |
nkeynes@359 | 187 | } |
nkeynes@359 | 188 | |
nkeynes@359 | 189 | /** |
nkeynes@359 | 190 | * Similarly to the above method, promotes a block to temp space. |
nkeynes@359 | 191 | * TODO: Try to combine these - they're nearly identical |
nkeynes@359 | 192 | */ |
nkeynes@359 | 193 | void xlat_promote_to_temp_space( xlat_cache_block_t block ) |
nkeynes@359 | 194 | { |
nkeynes@359 | 195 | int size = block->size; |
nkeynes@359 | 196 | int allocation = -sizeof(struct xlat_cache_block); |
nkeynes@359 | 197 | xlat_cache_block_t curr = xlat_temp_cache_ptr; |
nkeynes@359 | 198 | xlat_cache_block_t start_block = curr; |
nkeynes@359 | 199 | do { |
nkeynes@359 | 200 | if( curr->active == BLOCK_USED ) { |
nkeynes@359 | 201 | xlat_promote_to_old_space( curr ); |
nkeynes@359 | 202 | } |
nkeynes@359 | 203 | allocation += curr->size + sizeof(struct xlat_cache_block); |
nkeynes@359 | 204 | curr = NEXT(curr); |
nkeynes@359 | 205 | if( allocation > size ) { |
nkeynes@359 | 206 | break; /* done */ |
nkeynes@359 | 207 | } |
nkeynes@359 | 208 | if( curr->size == 0 ) { /* End-of-cache Sentinel */ |
nkeynes@359 | 209 | /* Leave what we just released as free space and start again from the |
nkeynes@359 | 210 | * top of the cache |
nkeynes@359 | 211 | */ |
nkeynes@359 | 212 | start_block->active = 0; |
nkeynes@359 | 213 | start_block->size = allocation; |
nkeynes@359 | 214 | allocation = -sizeof(struct xlat_cache_block); |
nkeynes@359 | 215 | start_block = curr = xlat_temp_cache; |
nkeynes@359 | 216 | } |
nkeynes@359 | 217 | } while(1); |
nkeynes@359 | 218 | start_block->active = 1; |
nkeynes@359 | 219 | start_block->size = allocation; |
nkeynes@359 | 220 | start_block->lut_entry = block->lut_entry; |
nkeynes@359 | 221 | *block->lut_entry = &start_block->code; |
nkeynes@359 | 222 | memcpy( start_block->code, block->code, block->size ); |
nkeynes@359 | 223 | xlat_temp_cache_ptr = xlat_cut_block(start_block, size ); |
nkeynes@359 | 224 | if( xlat_temp_cache_ptr->size == 0 ) { |
nkeynes@359 | 225 | xlat_temp_cache_ptr = xlat_temp_cache; |
nkeynes@359 | 226 | } |
nkeynes@359 | 227 | |
nkeynes@359 | 228 | } |
nkeynes@359 | 229 | |
nkeynes@359 | 230 | /** |
nkeynes@359 | 231 | * Returns the next block in the new cache list that can be written to by the |
nkeynes@359 | 232 | * translator. If the next block is active, it is evicted first. |
nkeynes@359 | 233 | */ |
nkeynes@359 | 234 | xlat_cache_block_t xlat_start_block( sh4addr_t address ) |
nkeynes@359 | 235 | { |
nkeynes@359 | 236 | if( xlat_new_cache_ptr->size == 0 ) { |
nkeynes@359 | 237 | xlat_new_cache_ptr = xlat_new_cache; |
nkeynes@359 | 238 | } |
nkeynes@359 | 239 | |
nkeynes@359 | 240 | if( xlat_new_cache_ptr->active ) { |
nkeynes@359 | 241 | xlat_promote_to_temp_space( xlat_new_cache_ptr ); |
nkeynes@359 | 242 | } |
nkeynes@359 | 243 | xlat_new_create_ptr = xlat_new_cache_ptr; |
nkeynes@359 | 244 | xlat_new_create_ptr->active = 1; |
nkeynes@359 | 245 | xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr); |
nkeynes@359 | 246 | |
nkeynes@359 | 247 | /* Add the LUT entry for the block */ |
nkeynes@359 | 248 | if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) { |
nkeynes@359 | 249 | xlat_lut[XLAT_LUT_PAGE(address)] = |
nkeynes@359 | 250 | mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE, |
nkeynes@359 | 251 | MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 ); |
nkeynes@359 | 252 | memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE ); |
nkeynes@359 | 253 | } |
nkeynes@359 | 254 | |
nkeynes@359 | 255 | if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) { |
nkeynes@359 | 256 | xlat_cache_block_t oldblock = BLOCK_FOR_CODE(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]); |
nkeynes@359 | 257 | oldblock->active = 0; |
nkeynes@359 | 258 | } |
nkeynes@359 | 259 | |
nkeynes@359 | 260 | xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] = |
nkeynes@359 | 261 | &xlat_new_create_ptr->code; |
nkeynes@359 | 262 | xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address); |
nkeynes@359 | 263 | |
nkeynes@359 | 264 | return xlat_new_create_ptr; |
nkeynes@359 | 265 | } |
nkeynes@359 | 266 | |
nkeynes@359 | 267 | xlat_cache_block_t xlat_extend_block() |
nkeynes@359 | 268 | { |
nkeynes@359 | 269 | if( xlat_new_cache_ptr->size == 0 ) { |
nkeynes@359 | 270 | /* Migrate to the front of the cache to keep it contiguous */ |
nkeynes@359 | 271 | xlat_new_create_ptr->active = 0; |
nkeynes@359 | 272 | char *olddata = xlat_new_create_ptr->code; |
nkeynes@359 | 273 | int oldsize = xlat_new_create_ptr->size; |
nkeynes@359 | 274 | int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */ |
nkeynes@359 | 275 | void **lut_entry = xlat_new_create_ptr->lut_entry; |
nkeynes@359 | 276 | int allocation = -sizeof(struct xlat_cache_block); |
nkeynes@359 | 277 | xlat_new_cache_ptr = xlat_new_cache; |
nkeynes@359 | 278 | do { |
nkeynes@359 | 279 | if( xlat_new_cache_ptr->active ) { |
nkeynes@359 | 280 | xlat_promote_to_temp_space( xlat_new_cache_ptr ); |
nkeynes@359 | 281 | } |
nkeynes@359 | 282 | allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block); |
nkeynes@359 | 283 | xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr); |
nkeynes@359 | 284 | } while( allocation < size ); |
nkeynes@359 | 285 | xlat_new_create_ptr = xlat_new_cache; |
nkeynes@359 | 286 | xlat_new_create_ptr->active = 1; |
nkeynes@359 | 287 | xlat_new_create_ptr->size = allocation; |
nkeynes@359 | 288 | xlat_new_create_ptr->lut_entry = lut_entry; |
nkeynes@359 | 289 | *lut_entry = &xlat_new_create_ptr->code; |
nkeynes@359 | 290 | memmove( xlat_new_create_ptr->code, olddata, oldsize ); |
nkeynes@359 | 291 | } else { |
nkeynes@359 | 292 | if( xlat_new_cache_ptr->active ) { |
nkeynes@359 | 293 | xlat_promote_to_temp_space( xlat_new_cache_ptr ); |
nkeynes@359 | 294 | } |
nkeynes@359 | 295 | xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block); |
nkeynes@359 | 296 | xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr); |
nkeynes@359 | 297 | } |
nkeynes@359 | 298 | return xlat_new_create_ptr; |
nkeynes@359 | 299 | |
nkeynes@359 | 300 | } |
nkeynes@359 | 301 | |
nkeynes@359 | 302 | void xlat_commit_block( uint32_t destsize, uint32_t srcsize ) |
nkeynes@359 | 303 | { |
nkeynes@359 | 304 | void **ptr = xlat_new_create_ptr->lut_entry; |
nkeynes@359 | 305 | void **endptr = ptr + (srcsize>>2); |
nkeynes@359 | 306 | while( ptr < endptr ) { |
nkeynes@359 | 307 | if( *ptr == NULL ) { |
nkeynes@359 | 308 | *ptr = XLAT_LUT_ENTRY_USED; |
nkeynes@359 | 309 | } |
nkeynes@359 | 310 | ptr++; |
nkeynes@359 | 311 | } |
nkeynes@359 | 312 | |
nkeynes@359 | 313 | xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize ); |
nkeynes@359 | 314 | } |
nkeynes@359 | 315 | |
nkeynes@359 | 316 | void xlat_delete_block( xlat_cache_block_t block ) |
nkeynes@359 | 317 | { |
nkeynes@359 | 318 | block->active = 0; |
nkeynes@359 | 319 | *block->lut_entry = NULL; |
nkeynes@359 | 320 | } |
nkeynes@359 | 321 | |
nkeynes@359 | 322 | void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size ) |
nkeynes@359 | 323 | { |
nkeynes@359 | 324 | int foundptr = 0; |
nkeynes@359 | 325 | xlat_cache_block_t tail = |
nkeynes@359 | 326 | (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block)); |
nkeynes@359 | 327 | |
nkeynes@359 | 328 | assert( tail->active == 1 ); |
nkeynes@359 | 329 | assert( tail->size == 0 ); |
nkeynes@359 | 330 | while( cache < tail ) { |
nkeynes@359 | 331 | assert( cache->active >= 0 && cache->active <= 2 ); |
nkeynes@359 | 332 | assert( cache->size >= 0 && cache->size < size ); |
nkeynes@359 | 333 | if( cache == ptr ) { |
nkeynes@359 | 334 | foundptr = 1; |
nkeynes@359 | 335 | } |
nkeynes@359 | 336 | cache = NEXT(cache); |
nkeynes@359 | 337 | } |
nkeynes@359 | 338 | assert( cache == tail ); |
nkeynes@359 | 339 | assert( foundptr == 1 ); |
nkeynes@359 | 340 | } |
nkeynes@359 | 341 | |
nkeynes@359 | 342 | void xlat_check_integrity( ) |
nkeynes@359 | 343 | { |
nkeynes@359 | 344 | xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE ); |
nkeynes@359 | 345 | xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE ); |
nkeynes@359 | 346 | xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE ); |
nkeynes@359 | 347 | } |
.