Search
lxdream.org :: lxdream/src/sh4/xltcache.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/xltcache.c
changeset 617:476a717a54f3
prev596:dfc0c93d882e
next736:a02d1475ccfd
author nkeynes
date Thu May 15 10:22:39 2008 +0000 (15 years ago)
permissions -rw-r--r--
last change Permanently add SH4 instruction statistics tracking (enabled with --enable-sh4stats)
file annotate diff log raw
nkeynes@359
     1
/**
nkeynes@586
     2
 * $Id$
nkeynes@359
     3
 * 
nkeynes@359
     4
 * Translation cache management. This part is architecture independent.
nkeynes@359
     5
 *
nkeynes@359
     6
 * Copyright (c) 2005 Nathan Keynes.
nkeynes@359
     7
 *
nkeynes@359
     8
 * This program is free software; you can redistribute it and/or modify
nkeynes@359
     9
 * it under the terms of the GNU General Public License as published by
nkeynes@359
    10
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@359
    11
 * (at your option) any later version.
nkeynes@359
    12
 *
nkeynes@359
    13
 * This program is distributed in the hope that it will be useful,
nkeynes@359
    14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@359
    15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@359
    16
 * GNU General Public License for more details.
nkeynes@359
    17
 */
nkeynes@488
    18
 
nkeynes@488
    19
#include <sys/types.h>
nkeynes@359
    20
#include <sys/mman.h>
nkeynes@359
    21
#include <assert.h>
nkeynes@359
    22
nkeynes@428
    23
#include "dreamcast.h"
nkeynes@586
    24
#include "sh4/sh4core.h"
nkeynes@428
    25
#include "sh4/xltcache.h"
nkeynes@428
    26
#include "x86dasm/x86dasm.h"
nkeynes@428
    27
nkeynes@359
    28
#define XLAT_LUT_PAGE_BITS 12
nkeynes@359
    29
#define XLAT_LUT_TOTAL_BITS 28
nkeynes@359
    30
#define XLAT_LUT_PAGE(addr) (((addr)>>13) & 0xFFFF)
nkeynes@359
    31
#define XLAT_LUT_ENTRY(addr) (((addr)&0x1FFE) >> 1)
nkeynes@359
    32
nkeynes@359
    33
#define XLAT_LUT_PAGES (1<<(XLAT_LUT_TOTAL_BITS-XLAT_LUT_PAGE_BITS))
nkeynes@359
    34
#define XLAT_LUT_PAGE_ENTRIES (1<<XLAT_LUT_PAGE_BITS)
nkeynes@359
    35
#define XLAT_LUT_PAGE_SIZE (XLAT_LUT_PAGE_ENTRIES * sizeof(void *))
nkeynes@359
    36
nkeynes@359
    37
#define XLAT_LUT_ENTRY_EMPTY (void *)0
nkeynes@359
    38
#define XLAT_LUT_ENTRY_USED  (void *)1
nkeynes@359
    39
nkeynes@359
    40
#define NEXT(block) ( (xlat_cache_block_t)&((block)->code[(block)->size]))
nkeynes@359
    41
#define BLOCK_FOR_CODE(code) (((xlat_cache_block_t)code)-1)
nkeynes@359
    42
#define IS_ENTRY_POINT(ent) (ent > XLAT_LUT_ENTRY_USED)
nkeynes@359
    43
#define IS_ENTRY_USED(ent) (ent != XLAT_LUT_ENTRY_EMPTY)
nkeynes@359
    44
nkeynes@359
    45
#define MIN_BLOCK_SIZE 32
nkeynes@359
    46
#define MIN_TOTAL_SIZE (sizeof(struct xlat_cache_block)+MIN_BLOCK_SIZE)
nkeynes@359
    47
nkeynes@359
    48
#define BLOCK_INACTIVE 0
nkeynes@359
    49
#define BLOCK_ACTIVE 1
nkeynes@359
    50
#define BLOCK_USED 2
nkeynes@359
    51
nkeynes@359
    52
xlat_cache_block_t xlat_new_cache;
nkeynes@359
    53
xlat_cache_block_t xlat_new_cache_ptr;
nkeynes@359
    54
xlat_cache_block_t xlat_new_create_ptr;
nkeynes@359
    55
xlat_cache_block_t xlat_temp_cache;
nkeynes@359
    56
xlat_cache_block_t xlat_temp_cache_ptr;
nkeynes@359
    57
xlat_cache_block_t xlat_old_cache;
nkeynes@359
    58
xlat_cache_block_t xlat_old_cache_ptr;
nkeynes@359
    59
static void ***xlat_lut;
nkeynes@376
    60
static gboolean xlat_initialized = FALSE;
nkeynes@359
    61
nkeynes@422
    62
void xlat_cache_init(void) 
nkeynes@359
    63
{
nkeynes@376
    64
    if( !xlat_initialized ) {
nkeynes@376
    65
	xlat_initialized = TRUE;
nkeynes@376
    66
	xlat_new_cache = mmap( NULL, XLAT_NEW_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@488
    67
			       MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@376
    68
	xlat_temp_cache = mmap( NULL, XLAT_TEMP_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@488
    69
				MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@376
    70
	xlat_old_cache = mmap( NULL, XLAT_OLD_CACHE_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE,
nkeynes@488
    71
			       MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@376
    72
	xlat_new_cache_ptr = xlat_new_cache;
nkeynes@376
    73
	xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@376
    74
	xlat_old_cache_ptr = xlat_old_cache;
nkeynes@376
    75
	xlat_new_create_ptr = xlat_new_cache;
nkeynes@376
    76
	
nkeynes@376
    77
	xlat_lut = mmap( NULL, XLAT_LUT_PAGES*sizeof(void *), PROT_READ|PROT_WRITE,
nkeynes@488
    78
			 MAP_PRIVATE|MAP_ANON, -1, 0);
nkeynes@376
    79
	memset( xlat_lut, 0, XLAT_LUT_PAGES*sizeof(void *) );
nkeynes@376
    80
    }
nkeynes@359
    81
    xlat_flush_cache();
nkeynes@359
    82
}
nkeynes@359
    83
nkeynes@400
    84
void xlat_print_free( FILE *out )
nkeynes@400
    85
{
nkeynes@400
    86
    fprintf( out, "New space: %d\nTemp space: %d\nOld space: %d\n", 
nkeynes@400
    87
	     xlat_new_cache_ptr->size, xlat_temp_cache_ptr->size, xlat_old_cache_ptr->size );
nkeynes@400
    88
}
nkeynes@400
    89
nkeynes@359
    90
/**
nkeynes@359
    91
 * Reset the cache structure to its default state
nkeynes@359
    92
 */
nkeynes@359
    93
void xlat_flush_cache() 
nkeynes@359
    94
{
nkeynes@359
    95
    xlat_cache_block_t tmp;
nkeynes@359
    96
    int i;
nkeynes@359
    97
    xlat_new_cache_ptr = xlat_new_cache;
nkeynes@359
    98
    xlat_new_cache_ptr->active = 0;
nkeynes@359
    99
    xlat_new_cache_ptr->size = XLAT_NEW_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@359
   100
    tmp = NEXT(xlat_new_cache_ptr);
nkeynes@359
   101
    tmp->active = 1;
nkeynes@359
   102
    tmp->size = 0;
nkeynes@359
   103
    xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@359
   104
    xlat_temp_cache_ptr->active = 0;
nkeynes@359
   105
    xlat_temp_cache_ptr->size = XLAT_TEMP_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@359
   106
    tmp = NEXT(xlat_temp_cache_ptr);
nkeynes@359
   107
    tmp->active = 1;
nkeynes@359
   108
    tmp->size = 0;
nkeynes@359
   109
    xlat_old_cache_ptr = xlat_old_cache;
nkeynes@359
   110
    xlat_old_cache_ptr->active = 0;
nkeynes@359
   111
    xlat_old_cache_ptr->size = XLAT_OLD_CACHE_SIZE - 2*sizeof(struct xlat_cache_block);
nkeynes@359
   112
    tmp = NEXT(xlat_old_cache_ptr);
nkeynes@359
   113
    tmp->active = 1;
nkeynes@359
   114
    tmp->size = 0;
nkeynes@359
   115
    for( i=0; i<XLAT_LUT_PAGES; i++ ) {
nkeynes@359
   116
	if( xlat_lut[i] != NULL ) {
nkeynes@359
   117
	    memset( xlat_lut[i], 0, XLAT_LUT_PAGE_SIZE );
nkeynes@359
   118
	}
nkeynes@359
   119
    }
nkeynes@359
   120
}
nkeynes@359
   121
nkeynes@400
   122
static void xlat_flush_page_by_lut( void **page )
nkeynes@359
   123
{
nkeynes@359
   124
    int i;
nkeynes@359
   125
    for( i=0; i<XLAT_LUT_PAGE_ENTRIES; i++ ) {
nkeynes@359
   126
	if( IS_ENTRY_POINT(page[i]) ) {
nkeynes@359
   127
	    BLOCK_FOR_CODE(page[i])->active = 0;
nkeynes@359
   128
	}
nkeynes@359
   129
	page[i] = NULL;
nkeynes@359
   130
    }
nkeynes@359
   131
}
nkeynes@359
   132
nkeynes@400
   133
void xlat_invalidate_word( sh4addr_t addr )
nkeynes@400
   134
{
nkeynes@400
   135
    if( xlat_lut ) {
nkeynes@400
   136
	void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
nkeynes@400
   137
	if( page != NULL ) {
nkeynes@400
   138
	    int entry = XLAT_LUT_ENTRY(addr);
nkeynes@400
   139
	    if( page[entry] != NULL ) {
nkeynes@400
   140
		xlat_flush_page_by_lut(page);
nkeynes@400
   141
	    }
nkeynes@400
   142
	}
nkeynes@400
   143
    }
nkeynes@400
   144
}
nkeynes@400
   145
nkeynes@400
   146
void xlat_invalidate_long( sh4addr_t addr )
nkeynes@400
   147
{
nkeynes@400
   148
    if( xlat_lut ) {
nkeynes@400
   149
	void **page = xlat_lut[XLAT_LUT_PAGE(addr)];
nkeynes@400
   150
	if( page != NULL ) {
nkeynes@400
   151
	    int entry = XLAT_LUT_ENTRY(addr);
nkeynes@400
   152
	    if( page[entry] != NULL || page[entry+1] != NULL ) {
nkeynes@400
   153
		xlat_flush_page_by_lut(page);
nkeynes@400
   154
	    }
nkeynes@400
   155
	}
nkeynes@400
   156
    }
nkeynes@400
   157
}
nkeynes@400
   158
nkeynes@400
   159
void xlat_invalidate_block( sh4addr_t address, size_t size )
nkeynes@400
   160
{
nkeynes@400
   161
    int i;
nkeynes@400
   162
    int entry_count = size >> 1; // words;
nkeynes@400
   163
    uint32_t page_no = XLAT_LUT_PAGE(address);
nkeynes@400
   164
    int entry = XLAT_LUT_ENTRY(address);
nkeynes@400
   165
    if( xlat_lut ) {
nkeynes@400
   166
	do {
nkeynes@400
   167
	    void **page = xlat_lut[page_no];
nkeynes@400
   168
	    int page_entries = XLAT_LUT_PAGE_ENTRIES - entry;
nkeynes@400
   169
	    if( entry_count < page_entries ) {
nkeynes@400
   170
		page_entries = entry_count;
nkeynes@400
   171
	    }
nkeynes@400
   172
	    if( page != NULL ) {
nkeynes@400
   173
		if( page_entries == XLAT_LUT_PAGE_ENTRIES ) {
nkeynes@400
   174
		    /* Overwriting the entire page anyway */
nkeynes@400
   175
		    xlat_flush_page_by_lut(page);
nkeynes@400
   176
		} else {
nkeynes@400
   177
		    for( i=entry; i<entry+page_entries; i++ ) {
nkeynes@400
   178
			if( page[i] != NULL ) {
nkeynes@400
   179
			    xlat_flush_page_by_lut(page);
nkeynes@400
   180
			    break;
nkeynes@400
   181
			}
nkeynes@400
   182
		    }
nkeynes@400
   183
		}
nkeynes@400
   184
		entry_count -= page_entries;
nkeynes@400
   185
	    }
nkeynes@400
   186
	    page_no ++;
nkeynes@400
   187
	    entry_count -= page_entries;
nkeynes@400
   188
	    entry = 0;
nkeynes@400
   189
	} while( entry_count > 0 );
nkeynes@400
   190
    }
nkeynes@400
   191
}
nkeynes@400
   192
nkeynes@400
   193
void xlat_flush_page( sh4addr_t address )
nkeynes@400
   194
{
nkeynes@400
   195
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@400
   196
    if( page != NULL ) {
nkeynes@400
   197
	xlat_flush_page_by_lut(page);
nkeynes@400
   198
    }
nkeynes@400
   199
}
nkeynes@400
   200
nkeynes@359
   201
void *xlat_get_code( sh4addr_t address )
nkeynes@359
   202
{
nkeynes@410
   203
    void *result = NULL;
nkeynes@359
   204
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@407
   205
    if( page != NULL ) {
nkeynes@527
   206
	result = (void *)(((uintptr_t)(page[XLAT_LUT_ENTRY(address)])) & (~((uintptr_t)0x03)));
nkeynes@407
   207
    }
nkeynes@407
   208
    return result;
nkeynes@407
   209
}
nkeynes@407
   210
nkeynes@586
   211
xlat_recovery_record_t xlat_get_recovery( void *code, void *native_pc, gboolean recover_after )
nkeynes@586
   212
{
nkeynes@586
   213
    if( code != NULL ) {
nkeynes@596
   214
	uintptr_t pc_offset = ((uint8_t *)native_pc) - ((uint8_t *)code);
nkeynes@586
   215
	xlat_cache_block_t block = BLOCK_FOR_CODE(code);
nkeynes@586
   216
	uint32_t count = block->recover_table_size;
nkeynes@592
   217
	xlat_recovery_record_t records = (xlat_recovery_record_t)(&block->code[block->recover_table_offset]);
nkeynes@586
   218
	uint32_t posn;
nkeynes@586
   219
	if( recover_after ) {
nkeynes@617
   220
	    if( records[count-1].xlat_offset < pc_offset ) {
nkeynes@586
   221
		return NULL;
nkeynes@586
   222
	    }
nkeynes@586
   223
	    for( posn=count-1; posn > 0; posn-- ) {
nkeynes@596
   224
		if( records[posn-1].xlat_offset < pc_offset ) {
nkeynes@586
   225
		    return &records[posn];
nkeynes@586
   226
		}
nkeynes@586
   227
	    }
nkeynes@586
   228
	    return &records[0]; // shouldn't happen
nkeynes@586
   229
	} else {
nkeynes@586
   230
	    for( posn = 1; posn < count; posn++ ) {
nkeynes@596
   231
		if( records[posn].xlat_offset >= pc_offset ) {
nkeynes@586
   232
		    return &records[posn-1];
nkeynes@586
   233
		}
nkeynes@586
   234
	    }
nkeynes@586
   235
	    return &records[count-1];
nkeynes@586
   236
	}
nkeynes@586
   237
    }
nkeynes@586
   238
    return NULL;
nkeynes@586
   239
}
nkeynes@586
   240
nkeynes@407
   241
void **xlat_get_lut_entry( sh4addr_t address )
nkeynes@407
   242
{
nkeynes@407
   243
    void **page = xlat_lut[XLAT_LUT_PAGE(address)];
nkeynes@407
   244
nkeynes@407
   245
    /* Add the LUT entry for the block */
nkeynes@359
   246
    if( page == NULL ) {
nkeynes@407
   247
	xlat_lut[XLAT_LUT_PAGE(address)] = page =
nkeynes@407
   248
	    mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
nkeynes@488
   249
		  MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@407
   250
	memset( page, 0, XLAT_LUT_PAGE_SIZE );
nkeynes@359
   251
    }
nkeynes@407
   252
    
nkeynes@407
   253
    return &page[XLAT_LUT_ENTRY(address)];
nkeynes@359
   254
}
nkeynes@359
   255
nkeynes@407
   256
nkeynes@407
   257
nkeynes@366
   258
uint32_t xlat_get_block_size( void *block )
nkeynes@366
   259
{
nkeynes@366
   260
    xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
nkeynes@366
   261
    return xlt->size;
nkeynes@366
   262
}
nkeynes@366
   263
nkeynes@586
   264
uint32_t xlat_get_code_size( void *block )
nkeynes@586
   265
{
nkeynes@586
   266
    xlat_cache_block_t xlt = (xlat_cache_block_t)(((char *)block)-sizeof(struct xlat_cache_block));
nkeynes@592
   267
    if( xlt->recover_table_offset == 0 ) {
nkeynes@586
   268
	return xlt->size;
nkeynes@586
   269
    } else {
nkeynes@592
   270
	return xlt->recover_table_offset;
nkeynes@586
   271
    }
nkeynes@586
   272
}
nkeynes@586
   273
nkeynes@359
   274
/**
nkeynes@359
   275
 * Cut the specified block so that it has the given size, with the remaining data
nkeynes@359
   276
 * forming a new free block. If the free block would be less than the minimum size,
nkeynes@359
   277
 * the cut is not performed.
nkeynes@359
   278
 * @return the next block after the (possibly cut) block.
nkeynes@359
   279
 */
nkeynes@359
   280
static inline xlat_cache_block_t xlat_cut_block( xlat_cache_block_t block, int cutsize )
nkeynes@359
   281
{
nkeynes@407
   282
    cutsize = (cutsize + 3) & 0xFFFFFFFC; // force word alignment
nkeynes@410
   283
    assert( cutsize <= block->size );
nkeynes@359
   284
    if( block->size > cutsize + MIN_TOTAL_SIZE ) {
nkeynes@359
   285
	int oldsize = block->size;
nkeynes@359
   286
	block->size = cutsize;
nkeynes@359
   287
	xlat_cache_block_t next = NEXT(block);
nkeynes@359
   288
	next->active = 0;
nkeynes@359
   289
	next->size = oldsize - cutsize - sizeof(struct xlat_cache_block);
nkeynes@359
   290
	return next;
nkeynes@359
   291
    } else {
nkeynes@359
   292
	return NEXT(block);
nkeynes@359
   293
    }
nkeynes@359
   294
}
nkeynes@359
   295
nkeynes@359
   296
/**
nkeynes@359
   297
 * Promote a block in temp space (or elsewhere for that matter) to old space.
nkeynes@359
   298
 *
nkeynes@359
   299
 * @param block to promote.
nkeynes@359
   300
 */
nkeynes@359
   301
static void xlat_promote_to_old_space( xlat_cache_block_t block )
nkeynes@359
   302
{
nkeynes@359
   303
    int allocation = -sizeof(struct xlat_cache_block);
nkeynes@359
   304
    int size = block->size;
nkeynes@359
   305
    xlat_cache_block_t curr = xlat_old_cache_ptr;
nkeynes@359
   306
    xlat_cache_block_t start_block = curr;
nkeynes@359
   307
    do {
nkeynes@359
   308
	allocation += curr->size + sizeof(struct xlat_cache_block);
nkeynes@359
   309
	curr = NEXT(curr);
nkeynes@359
   310
	if( allocation > size ) {
nkeynes@359
   311
	    break; /* done */
nkeynes@359
   312
	}
nkeynes@359
   313
	if( curr->size == 0 ) { /* End-of-cache Sentinel */
nkeynes@359
   314
	    /* Leave what we just released as free space and start again from the
nkeynes@359
   315
	     * top of the cache
nkeynes@359
   316
	     */
nkeynes@359
   317
	    start_block->active = 0;
nkeynes@359
   318
	    start_block->size = allocation;
nkeynes@359
   319
	    allocation = -sizeof(struct xlat_cache_block);
nkeynes@359
   320
	    start_block = curr = xlat_old_cache;
nkeynes@359
   321
	}
nkeynes@359
   322
    } while(1);
nkeynes@359
   323
    start_block->active = 1;
nkeynes@359
   324
    start_block->size = allocation;
nkeynes@359
   325
    start_block->lut_entry = block->lut_entry;
nkeynes@596
   326
    start_block->recover_table_offset = block->recover_table_offset;
nkeynes@596
   327
    start_block->recover_table_size = block->recover_table_size;
nkeynes@359
   328
    *block->lut_entry = &start_block->code;
nkeynes@359
   329
    memcpy( start_block->code, block->code, block->size );
nkeynes@359
   330
    xlat_old_cache_ptr = xlat_cut_block(start_block, size );
nkeynes@359
   331
    if( xlat_old_cache_ptr->size == 0 ) {
nkeynes@359
   332
	xlat_old_cache_ptr = xlat_old_cache;
nkeynes@359
   333
    }
nkeynes@359
   334
}
nkeynes@359
   335
nkeynes@359
   336
/**
nkeynes@359
   337
 * Similarly to the above method, promotes a block to temp space.
nkeynes@359
   338
 * TODO: Try to combine these - they're nearly identical
nkeynes@359
   339
 */
nkeynes@359
   340
void xlat_promote_to_temp_space( xlat_cache_block_t block )
nkeynes@359
   341
{
nkeynes@359
   342
    int size = block->size;
nkeynes@359
   343
    int allocation = -sizeof(struct xlat_cache_block);
nkeynes@359
   344
    xlat_cache_block_t curr = xlat_temp_cache_ptr;
nkeynes@359
   345
    xlat_cache_block_t start_block = curr;
nkeynes@359
   346
    do {
nkeynes@359
   347
	if( curr->active == BLOCK_USED ) {
nkeynes@359
   348
	    xlat_promote_to_old_space( curr );
nkeynes@596
   349
	} else if( curr->active == BLOCK_ACTIVE ) {
nkeynes@596
   350
	    // Active but not used, release block
nkeynes@596
   351
	   *((uintptr_t *)curr->lut_entry) &= ((uintptr_t)0x03);
nkeynes@359
   352
	}
nkeynes@359
   353
	allocation += curr->size + sizeof(struct xlat_cache_block);
nkeynes@359
   354
	curr = NEXT(curr);
nkeynes@359
   355
	if( allocation > size ) {
nkeynes@359
   356
	    break; /* done */
nkeynes@359
   357
	}
nkeynes@359
   358
	if( curr->size == 0 ) { /* End-of-cache Sentinel */
nkeynes@359
   359
	    /* Leave what we just released as free space and start again from the
nkeynes@359
   360
	     * top of the cache
nkeynes@359
   361
	     */
nkeynes@359
   362
	    start_block->active = 0;
nkeynes@359
   363
	    start_block->size = allocation;
nkeynes@359
   364
	    allocation = -sizeof(struct xlat_cache_block);
nkeynes@359
   365
	    start_block = curr = xlat_temp_cache;
nkeynes@359
   366
	}
nkeynes@359
   367
    } while(1);
nkeynes@359
   368
    start_block->active = 1;
nkeynes@359
   369
    start_block->size = allocation;
nkeynes@359
   370
    start_block->lut_entry = block->lut_entry;
nkeynes@596
   371
    start_block->recover_table_offset = block->recover_table_offset;
nkeynes@596
   372
    start_block->recover_table_size = block->recover_table_size;
nkeynes@359
   373
    *block->lut_entry = &start_block->code;
nkeynes@359
   374
    memcpy( start_block->code, block->code, block->size );
nkeynes@359
   375
    xlat_temp_cache_ptr = xlat_cut_block(start_block, size );
nkeynes@359
   376
    if( xlat_temp_cache_ptr->size == 0 ) {
nkeynes@359
   377
	xlat_temp_cache_ptr = xlat_temp_cache;
nkeynes@359
   378
    }
nkeynes@359
   379
    
nkeynes@359
   380
}
nkeynes@359
   381
nkeynes@359
   382
/**
nkeynes@359
   383
 * Returns the next block in the new cache list that can be written to by the
nkeynes@359
   384
 * translator. If the next block is active, it is evicted first.
nkeynes@359
   385
 */
nkeynes@359
   386
xlat_cache_block_t xlat_start_block( sh4addr_t address )
nkeynes@359
   387
{
nkeynes@359
   388
    if( xlat_new_cache_ptr->size == 0 ) {
nkeynes@359
   389
	xlat_new_cache_ptr = xlat_new_cache;
nkeynes@359
   390
    }
nkeynes@359
   391
nkeynes@359
   392
    if( xlat_new_cache_ptr->active ) {
nkeynes@359
   393
	xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@359
   394
    }
nkeynes@359
   395
    xlat_new_create_ptr = xlat_new_cache_ptr;
nkeynes@359
   396
    xlat_new_create_ptr->active = 1;
nkeynes@359
   397
    xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@359
   398
nkeynes@359
   399
    /* Add the LUT entry for the block */
nkeynes@359
   400
    if( xlat_lut[XLAT_LUT_PAGE(address)] == NULL ) {
nkeynes@359
   401
	xlat_lut[XLAT_LUT_PAGE(address)] =
nkeynes@359
   402
	    mmap( NULL, XLAT_LUT_PAGE_SIZE, PROT_READ|PROT_WRITE,
nkeynes@488
   403
		  MAP_PRIVATE|MAP_ANON, -1, 0 );
nkeynes@359
   404
	memset( xlat_lut[XLAT_LUT_PAGE(address)], 0, XLAT_LUT_PAGE_SIZE );
nkeynes@359
   405
    }
nkeynes@359
   406
nkeynes@359
   407
    if( IS_ENTRY_POINT(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]) ) {
nkeynes@359
   408
	xlat_cache_block_t oldblock = BLOCK_FOR_CODE(xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)]);
nkeynes@359
   409
	oldblock->active = 0;
nkeynes@359
   410
    }
nkeynes@359
   411
nkeynes@359
   412
    xlat_lut[XLAT_LUT_PAGE(address)][XLAT_LUT_ENTRY(address)] = 
nkeynes@359
   413
	&xlat_new_create_ptr->code;
nkeynes@359
   414
    xlat_new_create_ptr->lut_entry = xlat_lut[XLAT_LUT_PAGE(address)] + XLAT_LUT_ENTRY(address);
nkeynes@359
   415
    
nkeynes@359
   416
    return xlat_new_create_ptr;
nkeynes@359
   417
}
nkeynes@359
   418
nkeynes@410
   419
xlat_cache_block_t xlat_extend_block( uint32_t newSize )
nkeynes@359
   420
{
nkeynes@410
   421
    while( xlat_new_create_ptr->size < newSize ) {
nkeynes@410
   422
	if( xlat_new_cache_ptr->size == 0 ) {
nkeynes@410
   423
	    /* Migrate to the front of the cache to keep it contiguous */
nkeynes@410
   424
	    xlat_new_create_ptr->active = 0;
nkeynes@502
   425
	    sh4ptr_t olddata = xlat_new_create_ptr->code;
nkeynes@410
   426
	    int oldsize = xlat_new_create_ptr->size;
nkeynes@410
   427
	    int size = oldsize + MIN_BLOCK_SIZE; /* minimum expansion */
nkeynes@410
   428
	    void **lut_entry = xlat_new_create_ptr->lut_entry;
nkeynes@410
   429
	    int allocation = -sizeof(struct xlat_cache_block);
nkeynes@410
   430
	    xlat_new_cache_ptr = xlat_new_cache;
nkeynes@410
   431
	    do {
nkeynes@410
   432
		if( xlat_new_cache_ptr->active ) {
nkeynes@410
   433
		    xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@410
   434
		}
nkeynes@410
   435
		allocation += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
nkeynes@410
   436
		xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@410
   437
	    } while( allocation < size );
nkeynes@410
   438
	    xlat_new_create_ptr = xlat_new_cache;
nkeynes@410
   439
	    xlat_new_create_ptr->active = 1;
nkeynes@410
   440
	    xlat_new_create_ptr->size = allocation;
nkeynes@410
   441
	    xlat_new_create_ptr->lut_entry = lut_entry;
nkeynes@410
   442
	    *lut_entry = &xlat_new_create_ptr->code;
nkeynes@410
   443
	    memmove( xlat_new_create_ptr->code, olddata, oldsize );
nkeynes@410
   444
	} else {
nkeynes@359
   445
	    if( xlat_new_cache_ptr->active ) {
nkeynes@359
   446
		xlat_promote_to_temp_space( xlat_new_cache_ptr );
nkeynes@359
   447
	    }
nkeynes@410
   448
	    xlat_new_create_ptr->size += xlat_new_cache_ptr->size + sizeof(struct xlat_cache_block);
nkeynes@359
   449
	    xlat_new_cache_ptr = NEXT(xlat_new_cache_ptr);
nkeynes@359
   450
	}
nkeynes@359
   451
    }
nkeynes@359
   452
    return xlat_new_create_ptr;
nkeynes@359
   453
nkeynes@359
   454
}
nkeynes@359
   455
nkeynes@359
   456
void xlat_commit_block( uint32_t destsize, uint32_t srcsize )
nkeynes@359
   457
{
nkeynes@359
   458
    void **ptr = xlat_new_create_ptr->lut_entry;
nkeynes@359
   459
    void **endptr = ptr + (srcsize>>2);
nkeynes@359
   460
    while( ptr < endptr ) {
nkeynes@359
   461
	if( *ptr == NULL ) {
nkeynes@359
   462
	    *ptr = XLAT_LUT_ENTRY_USED;
nkeynes@359
   463
	}
nkeynes@359
   464
	ptr++;
nkeynes@359
   465
    }
nkeynes@359
   466
nkeynes@359
   467
    xlat_new_cache_ptr = xlat_cut_block( xlat_new_create_ptr, destsize );
nkeynes@359
   468
}
nkeynes@359
   469
nkeynes@359
   470
void xlat_delete_block( xlat_cache_block_t block ) 
nkeynes@359
   471
{
nkeynes@359
   472
    block->active = 0;
nkeynes@359
   473
    *block->lut_entry = NULL;
nkeynes@359
   474
}
nkeynes@359
   475
nkeynes@359
   476
void xlat_check_cache_integrity( xlat_cache_block_t cache, xlat_cache_block_t ptr, int size )
nkeynes@359
   477
{
nkeynes@359
   478
    int foundptr = 0;
nkeynes@359
   479
    xlat_cache_block_t tail = 
nkeynes@359
   480
	(xlat_cache_block_t)(((char *)cache) + size - sizeof(struct xlat_cache_block));
nkeynes@359
   481
nkeynes@359
   482
    assert( tail->active == 1 );
nkeynes@359
   483
    assert( tail->size == 0 ); 
nkeynes@359
   484
    while( cache < tail ) {
nkeynes@359
   485
	assert( cache->active >= 0 && cache->active <= 2 );
nkeynes@359
   486
	assert( cache->size >= 0 && cache->size < size );
nkeynes@359
   487
	if( cache == ptr ) {
nkeynes@359
   488
	    foundptr = 1;
nkeynes@359
   489
	}
nkeynes@359
   490
	cache = NEXT(cache);
nkeynes@359
   491
    }
nkeynes@359
   492
    assert( cache == tail );
nkeynes@596
   493
    assert( foundptr == 1 || tail == ptr );
nkeynes@359
   494
}
nkeynes@359
   495
nkeynes@359
   496
void xlat_check_integrity( )
nkeynes@359
   497
{
nkeynes@359
   498
    xlat_check_cache_integrity( xlat_new_cache, xlat_new_cache_ptr, XLAT_NEW_CACHE_SIZE );
nkeynes@359
   499
    xlat_check_cache_integrity( xlat_temp_cache, xlat_temp_cache_ptr, XLAT_TEMP_CACHE_SIZE );
nkeynes@359
   500
    xlat_check_cache_integrity( xlat_old_cache, xlat_old_cache_ptr, XLAT_OLD_CACHE_SIZE );
nkeynes@359
   501
}
nkeynes@376
   502
.