filename | src/sh4/xltcache.c |
changeset | 366:6fb0d05152d7 |
prev | 359:c588dce7ebde |
next | 376:8c7587af5a5d |
author | nkeynes |
date | Sat Sep 08 04:05:35 2007 +0000 (16 years ago) |
permissions | -rw-r--r-- |
last change | Handle video driver init failure cleanly (fallback to headless) Hookup shutdown for the GTK driver |
view | annotate | diff | log | raw |
1 /**
2 * $Id: xltcache.c,v 1.2 2007-09-04 08:32:44 nkeynes Exp $
3 *
4 * Translation cache management. This part is architecture independent.
5 *
6 * Copyright (c) 2005 Nathan Keynes.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
19 #include "sh4/xltcache.h"
20 #include "dreamcast.h"
21 #include <sys/mman.h>
22 #include <assert.h>
24 #define XLAT_LUT_PAGE_BITS 12
25 #define XLAT_LUT_TOTAL_BITS 28
26 #define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
27 #define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
29 #define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
30 #define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
31 #define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
33 #define XLAT_LUT_ENTRY_EMPTY (void *)0
34 #define XLAT_LUT_ENTRY_USED (void *)1
36 #define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
37 #define BLOCK_FOR_CODE(code) (((xlat_cache_block_t)code)-1)
38 #define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
39 #define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
41 #define MIN_BLOCK_SIZE 32
42 #define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
44 #define BLOCK_INACTIVE 0
45 #define BLOCK_ACTIVE 1
46 #define BLOCK_USED 2
48 xlat_cache_block_t xlat_new_cache;
49 xlat_cache_block_t xlat_new_cache_ptr;
50 xlat_cache_block_t xlat_new_create_ptr;
51 xlat_cache_block_t xlat_temp_cache;
52 xlat_cache_block_t xlat_temp_cache_ptr;
53 xlat_cache_block_t xlat_old_cache;
54 xlat_cache_block_t xlat_old_cache_ptr;
55 static void ***xlat_lut;
56 static void **xlat_lut2; /* second-tier page info */
58 void xlat_cache_init()
59 {
60 xlat_new_cache = mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
61 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
62 xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
63 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
64 xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
65 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
66 xlat_new_cache_ptr = xlat_new_cache;
67 xlat_temp_cache_ptr = xlat_temp_cache;
68 xlat_old_cache_ptr = xlat_old_cache;
69 xlat_new_create_ptr = xlat_new_cache;
71 xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
72 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
73 memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
75 xlat_flush_cache();
76 }
78 /**
79 * Reset the cache structure to its default state
80 */
81 void xlat_flush_cache()
82 {
83 xlat_cache_block_t tmp;
84 int i;
85 xlat_new_cache_ptr = xlat_new_cache;
86 xlat_new_cache_ptr->active = 0;
87 xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
88 tmp = NEXT(xlat_new_cache_ptr);
89 tmp->active = 1;
90 tmp->size = 0;
91 xlat_temp_cache_ptr = xlat_temp_cache;
92 xlat_temp_cache_ptr->active = 0;
93 xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
94 tmp = NEXT(xlat_temp_cache_ptr);
95 tmp->active = 1;
96 tmp->size = 0;
97 xlat_old_cache_ptr = xlat_old_cache;
98 xlat_old_cache_ptr->active = 0;
99 xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
100 tmp = NEXT(xlat_old_cache_ptr);
101 tmp->active = 1;
102 tmp->size = 0;
103 for( i=0; i<XLAT_LUT_PAGES; i++ ) {
104 if( xlat_lut[i] != NULL ) {
105 memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
106 }
107 }
108 }
110 void xlat_flush_page( sh4addr_t address )
111 {
112 int i;
113 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
114 for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
115 if( IS_ENTRY_POINT(page[i]) ) {
116 BLOCK_FOR_CODE(page[i])->active = 0;
117 }
118 page[i] = NULL;
119 }
120 }
122 void *xlat_get_code( sh4addr_t address )
123 {
124 void **page = xlat_lut[XLAT_LUT_PAGE(address)];
125 if( page == NULL ) {
126 return NULL;
127 }
128 return page[XLAT_LUT_ENTRY(address)];
129 }
131 uint32_t xlat_get_block_size( void *block )
132 {
133 xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
134 return xlt->size;
135 }
137 /**
138 * Cut the specified block so that it has the given size, with the remaining data
139 * forming a new free block. If the free block would be less than the minimum size,
140 * the cut is not performed.
141 * @return the next block after the (possibly cut) block.
142 */
143 static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
144 {
145 if( block->size > cutsize + MIN_TOTAL_SIZE ) {
146 int oldsize = block->size;
147 block->size = cutsize;
148 xlat_cache_block_t next = NEXT(block);
149 next->active = 0;
150 next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
151 return next;
152 } else {
153 return NEXT(block);
154 }
155 }
157 /**
158 * Promote a block in temp space (or elsewhere for that matter) to old space.
159 *
160 * @param block to promote.
161 */
162 static void xlat_promote_to_old_space( xlat_cache_block_t block )
163 {
164 int allocation = -sizeof(struct xlat_cache_block);
165 int size = block->size;
166 xlat_cache_block_t curr = xlat_old_cache_ptr;
167 xlat_cache_block_t start_block = curr;
168 do {
169 allocation += curr->size + sizeof(struct xlat_cache_block);
170 curr = NEXT(curr);
171 if( allocation > size ) {
172 break; /* done */
173 }
174 if( curr->size == 0 ) { /* End-of-cache Sentinel */
175 /* Leave what we just released as free space and start again from the
176 * top of the cache
177 */
178 start_block->active = 0;
179 start_block->size = allocation;
180 allocation = -sizeof(struct xlat_cache_block);
181 start_block = curr = xlat_old_cache;
182 }
183 } while(1);
184 start_block->active = 1;
185 start_block->size = allocation;
186 start_block->lut_entry = block->lut_entry;
187 *block->lut_entry = &start_block->code;
188 memcpy( start_block->code, block->code, block->size );
189 xlat_old_cache_ptr = xlat_cut_block(start_block, size );
190 if( xlat_old_cache_ptr->size == 0 ) {
191 xlat_old_cache_ptr = xlat_old_cache;
192 }
193 }
195 /**
196 * Similarly to the above method, promotes a block to temp space.
197 * TODO: Try to combine these - they're nearly identical
198 */
199 void xlat_promote_to_temp_space( xlat_cache_block_t block )
200 {
201 int size = block->size;
202 int allocation = -sizeof(struct xlat_cache_block);
203 xlat_cache_block_t curr = xlat_temp_cache_ptr;
204 xlat_cache_block_t start_block = curr;
205 do {
206 if( curr->active == BLOCK_USED ) {
207 xlat_promote_to_old_space( curr );
208 }
209 allocation += curr->size + sizeof(struct xlat_cache_block);
210 curr = NEXT(curr);
211 if( allocation > size ) {
212 break; /* done */
213 }
214 if( curr->size == 0 ) { /* End-of-cache Sentinel */
215 /* Leave what we just released as free space and start again from the
216 * top of the cache
217 */
218 start_block->active = 0;
219 start_block->size = allocation;
220 allocation = -sizeof(struct xlat_cache_block);
221 start_block = curr = xlat_temp_cache;
222 }
223 } while(1);
224 start_block->active = 1;
225 start_block->size = allocation;
226 start_block->lut_entry = block->lut_entry;
227 *block->lut_entry = &start_block->code;
228 memcpy( start_block->code, block->code, block->size );
229 xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
230 if( xlat_temp_cache_ptr->size == 0 ) {
231 xlat_temp_cache_ptr = xlat_temp_cache;
232 }
234 }
236 /**
237 * Returns the next block in the new cache list that can be written to by the
238 * translator. If the next block is active, it is evicted first.
239 */
240 xlat_cache_block_t xlat_start_block( sh4addr_t address )
241 {
242 if( xlat_new_cache_ptr->size == 0 ) {
243 xlat_new_cache_ptr = xlat_new_cache;
244 }
246 if( xlat_new_cache_ptr->active ) {
247 xlat_promote_to_temp_space( xlat_new_cache_ptr );
248 }
249 xlat_new_create_ptr = xlat_new_cache_ptr;
250 xlat_new_create_ptr->active = 1;
251 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
253 /* Add the LUT entry for the block */
254 if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) {
255 xlat_lut[XLAT_LUT_PAGE(address)] =
256 mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
257 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0 );
258 memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE );
259 }
261 if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) {
262 xlat_cache_block_t oldblock = BLOCK_FOR_CODE(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]);
263 oldblock->active = 0;
264 }
266 xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] =
267 &xlat_new_create_ptr->code;
268 xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address);
270 return xlat_new_create_ptr;
271 }
273 xlat_cache_block_t xlat_extend_block()
274 {
275 if( xlat_new_cache_ptr->size == 0 ) {
276 /* Migrate to the front of the cache to keep it contiguous */
277 xlat_new_create_ptr->active = 0;
278 char *olddata = xlat_new_create_ptr->code;
279 int oldsize = xlat_new_create_ptr->size;
280 int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
281 void **lut_entry = xlat_new_create_ptr->lut_entry;
282 int allocation = -sizeof(struct xlat_cache_block);
283 xlat_new_cache_ptr = xlat_new_cache;
284 do {
285 if( xlat_new_cache_ptr->active ) {
286 xlat_promote_to_temp_space( xlat_new_cache_ptr );
287 }
288 allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
289 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
290 } while( allocation < size );
291 xlat_new_create_ptr = xlat_new_cache;
292 xlat_new_create_ptr->active = 1;
293 xlat_new_create_ptr->size = allocation;
294 xlat_new_create_ptr->lut_entry = lut_entry;
295 *lut_entry = &xlat_new_create_ptr->code;
296 memmove( xlat_new_create_ptr->code, olddata, oldsize );
297 } else {
298 if( xlat_new_cache_ptr->active ) {
299 xlat_promote_to_temp_space( xlat_new_cache_ptr );
300 }
301 xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
302 xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
303 }
304 return xlat_new_create_ptr;
306 }
308 void xlat_commit_block( uint32_t destsize, uint32_t srcsize )
309 {
310 void **ptr = xlat_new_create_ptr->lut_entry;
311 void **endptr = ptr + (srcsize>>2);
312 while( ptr < endptr ) {
313 if( *ptr == NULL ) {
314 *ptr = XLAT_LUT_ENTRY_USED;
315 }
316 ptr++;
317 }
319 xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
320 }
322 void xlat_delete_block( xlat_cache_block_t block )
323 {
324 block->active = 0;
325 *block->lut_entry = NULL;
326 }
328 void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
329 {
330 int foundptr = 0;
331 xlat_cache_block_t tail =
332 (xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
334 assert( tail->active == 1 );
335 assert( tail->size == 0 );
336 while( cache < tail ) {
337 assert( cache->active >= 0 && cache->active <= 2 );
338 assert( cache->size >= 0 && cache->size < size );
339 if( cache == ptr ) {
340 foundptr = 1;
341 }
342 cache = NEXT(cache);
343 }
344 assert( cache == tail );
345 assert( foundptr == 1 );
346 }
348 void xlat_check_integrity( )
349 {
350 xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
351 xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
352 xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
353 }
.