Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 569:a1c49e1e8776
prev561:533f6b478071
next570:d2893980fbf5
author nkeynes
date Fri Jan 04 11:54:17 2008 +0000 (13 years ago)
branchlxdream-mmu
permissions -rw-r--r--
last change Bring icache partially into line with the mmu, a little less slow with AT off
now.
file annotate diff log raw
nkeynes@550
     1
/**
nkeynes@561
     2
 * $Id$
nkeynes@550
     3
 * 
nkeynes@550
     4
 * MMU implementation
nkeynes@550
     5
 *
nkeynes@550
     6
 * Copyright (c) 2005 Nathan Keynes.
nkeynes@550
     7
 *
nkeynes@550
     8
 * This program is free software; you can redistribute it and/or modify
nkeynes@550
     9
 * it under the terms of the GNU General Public License as published by
nkeynes@550
    10
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@550
    11
 * (at your option) any later version.
nkeynes@550
    12
 *
nkeynes@550
    13
 * This program is distributed in the hope that it will be useful,
nkeynes@550
    14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@550
    15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@550
    16
 * GNU General Public License for more details.
nkeynes@550
    17
 */
nkeynes@550
    18
#define MODULE sh4_module
nkeynes@550
    19
nkeynes@550
    20
#include <stdio.h>
nkeynes@550
    21
#include "sh4/sh4mmio.h"
nkeynes@550
    22
#include "sh4/sh4core.h"
nkeynes@550
    23
#include "mem.h"
nkeynes@550
    24
nkeynes@550
    25
#define OCRAM_START (0x1C000000>>PAGE_BITS)
nkeynes@550
    26
#define OCRAM_END   (0x20000000>>PAGE_BITS)
nkeynes@550
    27
nkeynes@550
    28
#define ITLB_ENTRY_COUNT 4
nkeynes@550
    29
#define UTLB_ENTRY_COUNT 64
nkeynes@550
    30
nkeynes@550
    31
/* Entry address */
nkeynes@550
    32
#define TLB_VALID     0x00000100
nkeynes@550
    33
#define TLB_USERMODE  0x00000040
nkeynes@550
    34
#define TLB_WRITABLE  0x00000020
nkeynes@559
    35
#define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
nkeynes@550
    36
#define TLB_SIZE_MASK 0x00000090
nkeynes@550
    37
#define TLB_SIZE_1K   0x00000000
nkeynes@550
    38
#define TLB_SIZE_4K   0x00000010
nkeynes@550
    39
#define TLB_SIZE_64K  0x00000080
nkeynes@550
    40
#define TLB_SIZE_1M   0x00000090
nkeynes@550
    41
#define TLB_CACHEABLE 0x00000008
nkeynes@550
    42
#define TLB_DIRTY     0x00000004
nkeynes@550
    43
#define TLB_SHARE     0x00000002
nkeynes@550
    44
#define TLB_WRITETHRU 0x00000001
nkeynes@550
    45
nkeynes@559
    46
#define MASK_1K  0xFFFFFC00
nkeynes@559
    47
#define MASK_4K  0xFFFFF000
nkeynes@559
    48
#define MASK_64K 0xFFFF0000
nkeynes@559
    49
#define MASK_1M  0xFFF00000
nkeynes@550
    50
nkeynes@550
    51
struct itlb_entry {
nkeynes@550
    52
    sh4addr_t vpn; // Virtual Page Number
nkeynes@550
    53
    uint32_t asid; // Process ID
nkeynes@559
    54
    uint32_t mask;
nkeynes@550
    55
    sh4addr_t ppn; // Physical Page Number
nkeynes@550
    56
    uint32_t flags;
nkeynes@550
    57
};
nkeynes@550
    58
nkeynes@550
    59
struct utlb_entry {
nkeynes@550
    60
    sh4addr_t vpn; // Virtual Page Number
nkeynes@559
    61
    uint32_t mask; // Page size mask
nkeynes@550
    62
    uint32_t asid; // Process ID
nkeynes@550
    63
    sh4addr_t ppn; // Physical Page Number
nkeynes@550
    64
    uint32_t flags;
nkeynes@550
    65
    uint32_t pcmcia; // extra pcmcia data - not used
nkeynes@550
    66
};
nkeynes@550
    67
nkeynes@550
    68
static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
nkeynes@550
    69
static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
nkeynes@550
    70
static uint32_t mmu_urc;
nkeynes@550
    71
static uint32_t mmu_urb;
nkeynes@550
    72
static uint32_t mmu_lrui;
nkeynes@569
    73
static uint32_t mmu_asid; // current asid
nkeynes@550
    74
nkeynes@550
    75
static sh4ptr_t cache = NULL;
nkeynes@550
    76
nkeynes@550
    77
static void mmu_invalidate_tlb();
nkeynes@550
    78
nkeynes@550
    79
nkeynes@559
    80
static uint32_t get_mask_for_flags( uint32_t flags )
nkeynes@559
    81
{
nkeynes@559
    82
    switch( flags & TLB_SIZE_MASK ) {
nkeynes@559
    83
    case TLB_SIZE_1K: return MASK_1K;
nkeynes@559
    84
    case TLB_SIZE_4K: return MASK_4K;
nkeynes@559
    85
    case TLB_SIZE_64K: return MASK_64K;
nkeynes@559
    86
    case TLB_SIZE_1M: return MASK_1M;
nkeynes@559
    87
    }
nkeynes@559
    88
}
nkeynes@559
    89
nkeynes@550
    90
int32_t mmio_region_MMU_read( uint32_t reg )
nkeynes@550
    91
{
nkeynes@550
    92
    switch( reg ) {
nkeynes@550
    93
    case MMUCR:
nkeynes@550
    94
	return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
nkeynes@550
    95
    default:
nkeynes@550
    96
	return MMIO_READ( MMU, reg );
nkeynes@550
    97
    }
nkeynes@550
    98
}
nkeynes@550
    99
nkeynes@550
   100
void mmio_region_MMU_write( uint32_t reg, uint32_t val )
nkeynes@550
   101
{
nkeynes@550
   102
    switch(reg) {
nkeynes@550
   103
    case PTEH:
nkeynes@550
   104
	val &= 0xFFFFFCFF;
nkeynes@569
   105
	if( (val & 0xFF) != mmu_asid ) {
nkeynes@569
   106
	    mmu_asid = val&0xFF;
nkeynes@569
   107
	    sh4_icache.page_vma = -1; // invalidate icache as asid has changed
nkeynes@569
   108
	}
nkeynes@550
   109
	break;
nkeynes@550
   110
    case PTEL:
nkeynes@550
   111
	val &= 0x1FFFFDFF;
nkeynes@550
   112
	break;
nkeynes@550
   113
    case PTEA:
nkeynes@550
   114
	val &= 0x0000000F;
nkeynes@550
   115
	break;
nkeynes@550
   116
    case MMUCR:
nkeynes@550
   117
	if( val & MMUCR_TI ) {
nkeynes@550
   118
	    mmu_invalidate_tlb();
nkeynes@550
   119
	}
nkeynes@550
   120
	mmu_urc = (val >> 10) & 0x3F;
nkeynes@550
   121
	mmu_urb = (val >> 18) & 0x3F;
nkeynes@550
   122
	mmu_lrui = (val >> 26) & 0x3F;
nkeynes@550
   123
	val &= 0x00000301;
nkeynes@550
   124
	break;
nkeynes@550
   125
    case CCR:
nkeynes@550
   126
	mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA) );
nkeynes@550
   127
	break;
nkeynes@550
   128
    default:
nkeynes@550
   129
	break;
nkeynes@550
   130
    }
nkeynes@550
   131
    MMIO_WRITE( MMU, reg, val );
nkeynes@550
   132
}
nkeynes@550
   133
nkeynes@550
   134
nkeynes@550
   135
void MMU_init() 
nkeynes@550
   136
{
nkeynes@550
   137
    cache = mem_alloc_pages(2);
nkeynes@550
   138
}
nkeynes@550
   139
nkeynes@550
   140
void MMU_reset()
nkeynes@550
   141
{
nkeynes@550
   142
    mmio_region_MMU_write( CCR, 0 );
nkeynes@550
   143
}
nkeynes@550
   144
nkeynes@550
   145
void MMU_save_state( FILE *f )
nkeynes@550
   146
{
nkeynes@550
   147
    fwrite( cache, 4096, 2, f );
nkeynes@550
   148
    fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
nkeynes@550
   149
    fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
nkeynes@559
   150
    fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
nkeynes@559
   151
    fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
nkeynes@559
   152
    fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
nkeynes@550
   153
}
nkeynes@550
   154
nkeynes@550
   155
int MMU_load_state( FILE *f )
nkeynes@550
   156
{
nkeynes@550
   157
    /* Setup the cache mode according to the saved register value
nkeynes@550
   158
     * (mem_load runs before this point to load all MMIO data)
nkeynes@550
   159
     */
nkeynes@550
   160
    mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
nkeynes@550
   161
    if( fread( cache, 4096, 2, f ) != 2 ) {
nkeynes@550
   162
	return 1;
nkeynes@550
   163
    }
nkeynes@550
   164
    if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
nkeynes@550
   165
	return 1;
nkeynes@550
   166
    }
nkeynes@550
   167
    if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
nkeynes@550
   168
	return 1;
nkeynes@550
   169
    }
nkeynes@559
   170
    if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
nkeynes@559
   171
	return 1;
nkeynes@559
   172
    }
nkeynes@559
   173
    if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
nkeynes@559
   174
	return 1;
nkeynes@559
   175
    }
nkeynes@559
   176
    if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
nkeynes@559
   177
	return 1;
nkeynes@559
   178
    }
nkeynes@550
   179
    return 0;
nkeynes@550
   180
}
nkeynes@550
   181
nkeynes@550
   182
void mmu_set_cache_mode( int mode )
nkeynes@550
   183
{
nkeynes@550
   184
    uint32_t i;
nkeynes@550
   185
    switch( mode ) {
nkeynes@550
   186
        case MEM_OC_INDEX0: /* OIX=0 */
nkeynes@550
   187
            for( i=OCRAM_START; i<OCRAM_END; i++ )
nkeynes@550
   188
                page_map[i] = cache + ((i&0x02)<<(PAGE_BITS-1));
nkeynes@550
   189
            break;
nkeynes@550
   190
        case MEM_OC_INDEX1: /* OIX=1 */
nkeynes@550
   191
            for( i=OCRAM_START; i<OCRAM_END; i++ )
nkeynes@550
   192
                page_map[i] = cache + ((i&0x02000000)>>(25-PAGE_BITS));
nkeynes@550
   193
            break;
nkeynes@550
   194
        default: /* disabled */
nkeynes@550
   195
            for( i=OCRAM_START; i<OCRAM_END; i++ )
nkeynes@550
   196
                page_map[i] = NULL;
nkeynes@550
   197
            break;
nkeynes@550
   198
    }
nkeynes@550
   199
}
nkeynes@550
   200
nkeynes@550
   201
/* TLB maintanence */
nkeynes@550
   202
nkeynes@550
   203
/**
nkeynes@550
   204
 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
nkeynes@550
   205
 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
nkeynes@550
   206
 */
nkeynes@550
   207
void MMU_ldtlb()
nkeynes@550
   208
{
nkeynes@550
   209
    mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
nkeynes@550
   210
    mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
nkeynes@550
   211
    mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
nkeynes@550
   212
    mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
nkeynes@550
   213
    mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
nkeynes@559
   214
    mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
nkeynes@550
   215
}
nkeynes@550
   216
nkeynes@559
   217
static inline void mmu_flush_pages( struct utlb_entry *ent )
nkeynes@550
   218
{
nkeynes@559
   219
    unsigned int vpn;
nkeynes@559
   220
    switch( ent->flags & TLB_SIZE_MASK ) {
nkeynes@559
   221
    case TLB_SIZE_1K: xlat_flush_page( ent->vpn ); break;
nkeynes@559
   222
    case TLB_SIZE_4K: xlat_flush_page( ent->vpn ); break;
nkeynes@559
   223
    case TLB_SIZE_64K: 
nkeynes@559
   224
	for( vpn = ent->vpn; vpn < ent->vpn + 0x10000; vpn += 0x1000 ) {
nkeynes@559
   225
	    xlat_flush_page( vpn );
nkeynes@559
   226
	}
nkeynes@559
   227
	break;
nkeynes@559
   228
    case TLB_SIZE_1M:
nkeynes@559
   229
	for( vpn = ent->vpn; vpn < ent->vpn + 0x100000; vpn += 0x1000 ) {
nkeynes@559
   230
	    xlat_flush_page( vpn );
nkeynes@559
   231
	}
nkeynes@559
   232
	break;
nkeynes@559
   233
    }
nkeynes@559
   234
}
nkeynes@559
   235
nkeynes@559
   236
/**
nkeynes@559
   237
 * The translations are excessively complicated, but unfortunately it's a 
nkeynes@559
   238
 * complicated system. It can undoubtedly be better optimized too.
nkeynes@559
   239
 */
nkeynes@559
   240
nkeynes@559
   241
/**
nkeynes@569
   242
 * Perform the actual utlb lookup w/ asid matching.
nkeynes@559
   243
 * Possible utcomes are:
nkeynes@559
   244
 *   0..63 Single match - good, return entry found
nkeynes@559
   245
 *   -1 No match - raise a tlb data miss exception
nkeynes@559
   246
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@559
   247
 * @param vpn virtual address to resolve
nkeynes@559
   248
 * @return the resultant UTLB entry, or an error.
nkeynes@559
   249
 */
nkeynes@569
   250
static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
nkeynes@559
   251
{
nkeynes@559
   252
    int result = -1;
nkeynes@559
   253
    unsigned int i;
nkeynes@559
   254
nkeynes@559
   255
    mmu_urc++;
nkeynes@559
   256
    if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
nkeynes@559
   257
	mmu_urc = 0;
nkeynes@559
   258
    }
nkeynes@559
   259
nkeynes@569
   260
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@569
   261
	if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@569
   262
	    ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) && 
nkeynes@569
   263
	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@569
   264
	    if( result != -1 ) {
nkeynes@569
   265
		return -2;
nkeynes@550
   266
	    }
nkeynes@569
   267
	    result = i;
nkeynes@550
   268
	}
nkeynes@550
   269
    }
nkeynes@559
   270
    return result;
nkeynes@559
   271
}
nkeynes@559
   272
nkeynes@559
   273
/**
nkeynes@569
   274
 * Perform the actual utlb lookup matching on vpn only
nkeynes@569
   275
 * Possible utcomes are:
nkeynes@569
   276
 *   0..63 Single match - good, return entry found
nkeynes@569
   277
 *   -1 No match - raise a tlb data miss exception
nkeynes@569
   278
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@569
   279
 * @param vpn virtual address to resolve
nkeynes@569
   280
 * @return the resultant UTLB entry, or an error.
nkeynes@569
   281
 */
nkeynes@569
   282
static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
nkeynes@569
   283
{
nkeynes@569
   284
    int result = -1;
nkeynes@569
   285
    unsigned int i;
nkeynes@569
   286
nkeynes@569
   287
    mmu_urc++;
nkeynes@569
   288
    if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
nkeynes@569
   289
	mmu_urc = 0;
nkeynes@569
   290
    }
nkeynes@569
   291
nkeynes@569
   292
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@569
   293
	if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@569
   294
	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@569
   295
	    if( result != -1 ) {
nkeynes@569
   296
		return -2;
nkeynes@569
   297
	    }
nkeynes@569
   298
	    result = i;
nkeynes@569
   299
	}
nkeynes@569
   300
    }
nkeynes@569
   301
nkeynes@569
   302
    return result;
nkeynes@569
   303
}
nkeynes@569
   304
nkeynes@569
   305
/**
nkeynes@559
   306
 * Find a UTLB entry for the associative TLB write - same as the normal
nkeynes@559
   307
 * lookup but ignores the valid bit.
nkeynes@559
   308
 */
nkeynes@559
   309
static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
nkeynes@559
   310
{
nkeynes@559
   311
    int result = -1;
nkeynes@559
   312
    unsigned int i;
nkeynes@559
   313
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@559
   314
	if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
nkeynes@559
   315
	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@559
   316
	    if( result != -1 ) {
nkeynes@559
   317
		return -2;
nkeynes@559
   318
	    }
nkeynes@559
   319
	    result = i;
nkeynes@559
   320
	}
nkeynes@559
   321
    }
nkeynes@559
   322
    return result;
nkeynes@559
   323
}
nkeynes@559
   324
nkeynes@559
   325
/**
nkeynes@569
   326
 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
nkeynes@569
   327
 * @return the number (0-3) of the replaced entry.
nkeynes@559
   328
 */
nkeynes@559
   329
static int inline mmu_itlb_update_from_utlb( int entryNo )
nkeynes@559
   330
{
nkeynes@559
   331
    int replace;
nkeynes@559
   332
    /* Determine entry to replace based on lrui */
nkeynes@559
   333
    if( mmu_lrui & 0x38 == 0x38 ) {
nkeynes@559
   334
	replace = 0;
nkeynes@559
   335
	mmu_lrui = mmu_lrui & 0x07;
nkeynes@559
   336
    } else if( (mmu_lrui & 0x26) == 0x06 ) {
nkeynes@559
   337
	replace = 1;
nkeynes@559
   338
	mmu_lrui = (mmu_lrui & 0x19) | 0x20;
nkeynes@559
   339
    } else if( (mmu_lrui & 0x15) == 0x01 ) {
nkeynes@559
   340
	replace = 2;
nkeynes@559
   341
	mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
nkeynes@559
   342
    } else { // Note - gets invalid entries too
nkeynes@559
   343
	replace = 3;
nkeynes@559
   344
	mmu_lrui = (mmu_lrui | 0x0B);
nkeynes@559
   345
    } 
nkeynes@559
   346
nkeynes@559
   347
    mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
nkeynes@559
   348
    mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
nkeynes@559
   349
    mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
nkeynes@559
   350
    mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
nkeynes@559
   351
    mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
nkeynes@559
   352
    return replace;
nkeynes@559
   353
}
nkeynes@559
   354
nkeynes@559
   355
/**
nkeynes@569
   356
 * Perform the actual itlb lookup w/ asid protection
nkeynes@569
   357
 * Possible utcomes are:
nkeynes@569
   358
 *   0..63 Single match - good, return entry found
nkeynes@569
   359
 *   -1 No match - raise a tlb data miss exception
nkeynes@569
   360
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@569
   361
 * @param vpn virtual address to resolve
nkeynes@569
   362
 * @return the resultant ITLB entry, or an error.
nkeynes@569
   363
 */
nkeynes@569
   364
static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
nkeynes@569
   365
{
nkeynes@569
   366
    int result = -1;
nkeynes@569
   367
    unsigned int i;
nkeynes@569
   368
nkeynes@569
   369
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@569
   370
	if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@569
   371
	    ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) && 
nkeynes@569
   372
	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@569
   373
	    if( result != -1 ) {
nkeynes@569
   374
		return -2;
nkeynes@569
   375
	    }
nkeynes@569
   376
	    result = i;
nkeynes@569
   377
	}
nkeynes@569
   378
    }
nkeynes@569
   379
nkeynes@569
   380
    if( result == -1 ) {
nkeynes@569
   381
	int utlbEntry = mmu_utlb_lookup_vpn( vpn );
nkeynes@569
   382
	if( utlbEntry == -1 ) {
nkeynes@569
   383
	    return -1;
nkeynes@569
   384
	} else {
nkeynes@569
   385
	    return mmu_itlb_update_from_utlb( utlbEntry );
nkeynes@569
   386
	}
nkeynes@569
   387
    }
nkeynes@569
   388
nkeynes@569
   389
    switch( result ) {
nkeynes@569
   390
    case 0: mmu_lrui = (mmu_lrui & 0x07); break;
nkeynes@569
   391
    case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
nkeynes@569
   392
    case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
nkeynes@569
   393
    case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
nkeynes@569
   394
    }
nkeynes@569
   395
	
nkeynes@569
   396
    return result;
nkeynes@569
   397
}
nkeynes@569
   398
nkeynes@569
   399
/**
nkeynes@569
   400
 * Perform the actual itlb lookup on vpn only
nkeynes@569
   401
 * Possible utcomes are:
nkeynes@569
   402
 *   0..63 Single match - good, return entry found
nkeynes@569
   403
 *   -1 No match - raise a tlb data miss exception
nkeynes@569
   404
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@569
   405
 * @param vpn virtual address to resolve
nkeynes@569
   406
 * @return the resultant ITLB entry, or an error.
nkeynes@569
   407
 */
nkeynes@569
   408
static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
nkeynes@569
   409
{
nkeynes@569
   410
    int result = -1;
nkeynes@569
   411
    unsigned int i;
nkeynes@569
   412
nkeynes@569
   413
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@569
   414
	if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@569
   415
	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@569
   416
	    if( result != -1 ) {
nkeynes@569
   417
		return -2;
nkeynes@569
   418
	    }
nkeynes@569
   419
	    result = i;
nkeynes@569
   420
	}
nkeynes@569
   421
    }
nkeynes@569
   422
nkeynes@569
   423
    if( result == -1 ) {
nkeynes@569
   424
	int utlbEntry = mmu_utlb_lookup_vpn( vpn );
nkeynes@569
   425
	if( utlbEntry == -1 ) {
nkeynes@569
   426
	    return -1;
nkeynes@569
   427
	} else {
nkeynes@569
   428
	    return mmu_itlb_update_from_utlb( utlbEntry );
nkeynes@569
   429
	}
nkeynes@569
   430
    }
nkeynes@569
   431
nkeynes@569
   432
    switch( result ) {
nkeynes@569
   433
    case 0: mmu_lrui = (mmu_lrui & 0x07); break;
nkeynes@569
   434
    case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
nkeynes@569
   435
    case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
nkeynes@569
   436
    case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
nkeynes@569
   437
    }
nkeynes@569
   438
	
nkeynes@569
   439
    return result;
nkeynes@569
   440
}
nkeynes@569
   441
nkeynes@569
   442
/**
nkeynes@559
   443
 * Find a ITLB entry for the associative TLB write - same as the normal
nkeynes@559
   444
 * lookup but ignores the valid bit.
nkeynes@559
   445
 */
nkeynes@559
   446
static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
nkeynes@559
   447
{
nkeynes@559
   448
    int result = -1;
nkeynes@559
   449
    unsigned int i;
nkeynes@559
   450
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@559
   451
	if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
nkeynes@559
   452
	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@559
   453
	    if( result != -1 ) {
nkeynes@559
   454
		return -2;
nkeynes@559
   455
	    }
nkeynes@559
   456
	    result = i;
nkeynes@559
   457
	}
nkeynes@559
   458
    }
nkeynes@559
   459
    return result;
nkeynes@559
   460
}
nkeynes@559
   461
nkeynes@559
   462
#define RAISE_TLB_ERROR(code, vpn) \
nkeynes@559
   463
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@559
   464
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
nkeynes@569
   465
    sh4_raise_tlb_exception(code);
nkeynes@559
   466
nkeynes@559
   467
#define RAISE_MEM_ERROR(code, vpn) \
nkeynes@559
   468
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@559
   469
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
nkeynes@569
   470
    sh4_raise_exception(code);
nkeynes@559
   471
nkeynes@559
   472
#define RAISE_OTHER_ERROR(code) \
nkeynes@569
   473
    sh4_raise_exception(code);
nkeynes@559
   474
nkeynes@559
   475
/**
nkeynes@559
   476
 * Abort with a non-MMU address error. Caused by user-mode code attempting
nkeynes@559
   477
 * to access privileged regions, or alignment faults.
nkeynes@559
   478
 */
nkeynes@559
   479
#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
nkeynes@559
   480
#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
nkeynes@559
   481
nkeynes@559
   482
#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
nkeynes@559
   483
#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
nkeynes@559
   484
#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
nkeynes@559
   485
#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
nkeynes@559
   486
#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
nkeynes@559
   487
#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
nkeynes@559
   488
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@569
   489
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
nkeynes@559
   490
nkeynes@559
   491
uint64_t mmu_vma_to_phys_write( sh4addr_t addr )
nkeynes@559
   492
{
nkeynes@559
   493
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@559
   494
    if( addr & 0x80000000 ) {
nkeynes@559
   495
	if( IS_SH4_PRIVMODE() ) {
nkeynes@559
   496
	    if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
nkeynes@559
   497
		/* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@559
   498
		return (uint64_t)addr;
nkeynes@559
   499
	    }
nkeynes@559
   500
	} else {
nkeynes@559
   501
	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
nkeynes@559
   502
		((mmucr&MMUCR_SQMD) == 0) ) {
nkeynes@559
   503
		/* Conditional user-mode access to the store-queue (no translation) */
nkeynes@559
   504
		return (uint64_t)addr;
nkeynes@559
   505
	    }
nkeynes@559
   506
	    MMU_WRITE_ADDR_ERROR();
nkeynes@569
   507
	    return 0x100000000LL;
nkeynes@559
   508
	}
nkeynes@559
   509
    }
nkeynes@559
   510
    
nkeynes@559
   511
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@559
   512
	return (uint64_t)addr;
nkeynes@559
   513
    }
nkeynes@559
   514
nkeynes@559
   515
    /* If we get this far, translation is required */
nkeynes@569
   516
    int entryNo;
nkeynes@569
   517
    if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
nkeynes@569
   518
	entryNo = mmu_utlb_lookup_vpn_asid( addr );
nkeynes@569
   519
    } else {
nkeynes@569
   520
	entryNo = mmu_utlb_lookup_vpn( addr );
nkeynes@569
   521
    }
nkeynes@559
   522
nkeynes@559
   523
    switch(entryNo) {
nkeynes@559
   524
    case -1:
nkeynes@559
   525
	MMU_TLB_WRITE_MISS_ERROR(addr);
nkeynes@569
   526
	return 0x100000000LL;
nkeynes@559
   527
    case -2:
nkeynes@559
   528
	MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@569
   529
        return 0x100000000LL;
nkeynes@559
   530
    default:
nkeynes@559
   531
	if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
nkeynes@559
   532
	    : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
nkeynes@559
   533
	    /* protection violation */
nkeynes@559
   534
	    MMU_TLB_WRITE_PROT_ERROR(addr);
nkeynes@569
   535
	    return 0x100000000LL;
nkeynes@559
   536
	}
nkeynes@559
   537
nkeynes@559
   538
	if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
nkeynes@559
   539
	    MMU_TLB_INITIAL_WRITE_ERROR(addr);
nkeynes@569
   540
	    return 0x100000000LL;
nkeynes@559
   541
	}
nkeynes@559
   542
nkeynes@559
   543
	/* finally generate the target address */
nkeynes@559
   544
	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
nkeynes@559
   545
	    (addr & (~mmu_utlb[entryNo].mask));
nkeynes@559
   546
    }
nkeynes@559
   547
    return -1;
nkeynes@559
   548
nkeynes@559
   549
}
nkeynes@559
   550
nkeynes@559
   551
uint64_t mmu_vma_to_phys_read( sh4addr_t addr )
nkeynes@559
   552
{
nkeynes@559
   553
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@559
   554
    if( addr & 0x80000000 ) {
nkeynes@559
   555
	if( IS_SH4_PRIVMODE() ) {
nkeynes@559
   556
	    if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
nkeynes@559
   557
		/* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@559
   558
		return (uint64_t)addr;
nkeynes@559
   559
	    }
nkeynes@559
   560
	} else {
nkeynes@559
   561
	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
nkeynes@559
   562
		((mmucr&MMUCR_SQMD) == 0) ) {
nkeynes@559
   563
		/* Conditional user-mode access to the store-queue (no translation) */
nkeynes@559
   564
		return (uint64_t)addr;
nkeynes@559
   565
	    }
nkeynes@559
   566
	    MMU_READ_ADDR_ERROR();
nkeynes@569
   567
	    return 0x100000000LL;
nkeynes@559
   568
	}
nkeynes@559
   569
    }
nkeynes@559
   570
    
nkeynes@559
   571
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@559
   572
	return (uint64_t)addr;
nkeynes@559
   573
    }
nkeynes@559
   574
nkeynes@559
   575
    /* If we get this far, translation is required */
nkeynes@569
   576
    int entryNo;
nkeynes@569
   577
    if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
nkeynes@569
   578
	entryNo = mmu_utlb_lookup_vpn_asid( addr );
nkeynes@569
   579
    } else {
nkeynes@569
   580
	entryNo = mmu_utlb_lookup_vpn( addr );
nkeynes@569
   581
    }
nkeynes@559
   582
nkeynes@559
   583
    switch(entryNo) {
nkeynes@559
   584
    case -1:
nkeynes@559
   585
	MMU_TLB_READ_MISS_ERROR(addr);
nkeynes@569
   586
	return 0x100000000LL;
nkeynes@559
   587
    case -2:
nkeynes@559
   588
	MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@569
   589
	return 0x100000000LL;
nkeynes@559
   590
    default:
nkeynes@559
   591
	if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
nkeynes@559
   592
	    !IS_SH4_PRIVMODE() ) {
nkeynes@559
   593
	    /* protection violation */
nkeynes@559
   594
	    MMU_TLB_READ_PROT_ERROR(addr);
nkeynes@569
   595
	    return 0x100000000LL;
nkeynes@559
   596
	}
nkeynes@559
   597
nkeynes@559
   598
	/* finally generate the target address */
nkeynes@559
   599
	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
nkeynes@559
   600
	    (addr & (~mmu_utlb[entryNo].mask));
nkeynes@559
   601
    }
nkeynes@559
   602
    return -1;
nkeynes@550
   603
}
nkeynes@550
   604
nkeynes@550
   605
static void mmu_invalidate_tlb()
nkeynes@550
   606
{
nkeynes@550
   607
    int i;
nkeynes@550
   608
    for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
nkeynes@550
   609
	mmu_itlb[i].flags &= (~TLB_VALID);
nkeynes@550
   610
    }
nkeynes@550
   611
    for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@550
   612
	mmu_utlb[i].flags &= (~TLB_VALID);
nkeynes@550
   613
    }
nkeynes@550
   614
}
nkeynes@550
   615
nkeynes@550
   616
#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
nkeynes@550
   617
nkeynes@550
   618
int32_t mmu_itlb_addr_read( sh4addr_t addr )
nkeynes@550
   619
{
nkeynes@550
   620
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@550
   621
    return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
nkeynes@550
   622
}
nkeynes@550
   623
int32_t mmu_itlb_data_read( sh4addr_t addr )
nkeynes@550
   624
{
nkeynes@550
   625
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@550
   626
    return ent->ppn | ent->flags;
nkeynes@550
   627
}
nkeynes@550
   628
nkeynes@550
   629
void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   630
{
nkeynes@550
   631
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@550
   632
    ent->vpn = val & 0xFFFFFC00;
nkeynes@550
   633
    ent->asid = val & 0x000000FF;
nkeynes@550
   634
    ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
nkeynes@550
   635
}
nkeynes@550
   636
nkeynes@550
   637
void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   638
{
nkeynes@550
   639
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@550
   640
    ent->ppn = val & 0x1FFFFC00;
nkeynes@550
   641
    ent->flags = val & 0x00001DA;
nkeynes@559
   642
    ent->mask = get_mask_for_flags(val);
nkeynes@550
   643
}
nkeynes@550
   644
nkeynes@550
   645
#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
nkeynes@550
   646
#define UTLB_ASSOC(addr) (addr&0x80)
nkeynes@550
   647
#define UTLB_DATA2(addr) (addr&0x00800000)
nkeynes@550
   648
nkeynes@550
   649
int32_t mmu_utlb_addr_read( sh4addr_t addr )
nkeynes@550
   650
{
nkeynes@550
   651
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@550
   652
    return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
nkeynes@550
   653
	((ent->flags & TLB_DIRTY)<<7);
nkeynes@550
   654
}
nkeynes@550
   655
int32_t mmu_utlb_data_read( sh4addr_t addr )
nkeynes@550
   656
{
nkeynes@550
   657
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@550
   658
    if( UTLB_DATA2(addr) ) {
nkeynes@550
   659
	return ent->pcmcia;
nkeynes@550
   660
    } else {
nkeynes@550
   661
	return ent->ppn | ent->flags;
nkeynes@550
   662
    }
nkeynes@550
   663
}
nkeynes@550
   664
nkeynes@550
   665
void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   666
{
nkeynes@550
   667
    if( UTLB_ASSOC(addr) ) {
nkeynes@559
   668
	uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
nkeynes@559
   669
	int entryNo = mmu_utlb_lookup_assoc( val, asid );
nkeynes@559
   670
	if( entryNo >= 0 ) {
nkeynes@559
   671
	    struct utlb_entry *ent = &mmu_utlb[entryNo];
nkeynes@559
   672
	    ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
nkeynes@559
   673
	    ent->flags |= (val & TLB_VALID);
nkeynes@559
   674
	    ent->flags |= ((val & 0x200)>>7);
nkeynes@559
   675
	} else if( entryNo == -2 ) {
nkeynes@559
   676
	    MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@559
   677
	}
nkeynes@550
   678
    } else {
nkeynes@550
   679
	struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@550
   680
	ent->vpn = (val & 0xFFFFFC00);
nkeynes@550
   681
	ent->asid = (val & 0xFF);
nkeynes@550
   682
	ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
nkeynes@550
   683
	ent->flags |= (val & TLB_VALID);
nkeynes@550
   684
	ent->flags |= ((val & 0x200)>>7);
nkeynes@550
   685
    }
nkeynes@550
   686
}
nkeynes@550
   687
nkeynes@550
   688
void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   689
{
nkeynes@550
   690
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@550
   691
    if( UTLB_DATA2(addr) ) {
nkeynes@550
   692
	ent->pcmcia = val & 0x0000000F;
nkeynes@550
   693
    } else {
nkeynes@550
   694
	ent->ppn = (val & 0x1FFFFC00);
nkeynes@550
   695
	ent->flags = (val & 0x000001FF);
nkeynes@559
   696
	ent->mask = get_mask_for_flags(val);
nkeynes@550
   697
    }
nkeynes@550
   698
}
nkeynes@550
   699
nkeynes@550
   700
/* Cache access - not implemented */
nkeynes@550
   701
nkeynes@550
   702
int32_t mmu_icache_addr_read( sh4addr_t addr )
nkeynes@550
   703
{
nkeynes@550
   704
    return 0; // not implemented
nkeynes@550
   705
}
nkeynes@550
   706
int32_t mmu_icache_data_read( sh4addr_t addr )
nkeynes@550
   707
{
nkeynes@550
   708
    return 0; // not implemented
nkeynes@550
   709
}
nkeynes@550
   710
int32_t mmu_ocache_addr_read( sh4addr_t addr )
nkeynes@550
   711
{
nkeynes@550
   712
    return 0; // not implemented
nkeynes@550
   713
}
nkeynes@550
   714
int32_t mmu_ocache_data_read( sh4addr_t addr )
nkeynes@550
   715
{
nkeynes@550
   716
    return 0; // not implemented
nkeynes@550
   717
}
nkeynes@550
   718
nkeynes@550
   719
void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   720
{
nkeynes@550
   721
}
nkeynes@550
   722
nkeynes@550
   723
void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   724
{
nkeynes@550
   725
}
nkeynes@550
   726
nkeynes@550
   727
void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   728
{
nkeynes@550
   729
}
nkeynes@550
   730
nkeynes@550
   731
void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   732
{
nkeynes@550
   733
}
nkeynes@569
   734
nkeynes@569
   735
/**
nkeynes@569
   736
 * Update the icache for an untranslated address
nkeynes@569
   737
 */
nkeynes@569
   738
void mmu_update_icache_phys( sh4addr_t addr )
nkeynes@569
   739
{
nkeynes@569
   740
    if( (addr & 0x1C000000) == 0x0C000000 ) {
nkeynes@569
   741
	/* Main ram */
nkeynes@569
   742
	sh4_icache.page_vma = addr & 0xFF000000;
nkeynes@569
   743
	sh4_icache.page_ppa = 0x0C000000;
nkeynes@569
   744
	sh4_icache.mask = 0xFF000000;
nkeynes@569
   745
	sh4_icache.page = sh4_main_ram;
nkeynes@569
   746
    } else if( (addr & 0x1FE00000 == 0 ) ) {
nkeynes@569
   747
	/* BIOS ROM */
nkeynes@569
   748
	sh4_icache.page_vma = addr & 0xFFE00000;
nkeynes@569
   749
	sh4_icache.page_ppa = 0;
nkeynes@569
   750
	sh4_icache.mask = 0xFFE00000;
nkeynes@569
   751
	sh4_icache.page = mem_get_region(0);
nkeynes@569
   752
    } else {
nkeynes@569
   753
	/* not supported */
nkeynes@569
   754
	sh4_icache.page_vma = -1;
nkeynes@569
   755
    }
nkeynes@569
   756
}
nkeynes@569
   757
nkeynes@569
   758
/**
nkeynes@569
   759
 * Update the sh4_icache structure to describe the page(s) containing the
nkeynes@569
   760
 * given vma. If the address does not reference a RAM/ROM region, the icache
nkeynes@569
   761
 * will be invalidated instead.
nkeynes@569
   762
 * If AT is on, this method will raise TLB exceptions normally
nkeynes@569
   763
 * (hence this method should only be used immediately prior to execution of
nkeynes@569
   764
 * code), and otherwise will set the icache according to the matching TLB entry.
nkeynes@569
   765
 * If AT is off, this method will set the entire referenced RAM/ROM region in
nkeynes@569
   766
 * the icache.
nkeynes@569
   767
 * @return TRUE if the update completed (successfully or otherwise), FALSE
nkeynes@569
   768
 * if an exception was raised.
nkeynes@569
   769
 */
nkeynes@569
   770
gboolean mmu_update_icache( sh4vma_t addr )
nkeynes@569
   771
{
nkeynes@569
   772
    int entryNo;
nkeynes@569
   773
    if( IS_SH4_PRIVMODE()  ) {
nkeynes@569
   774
	if( addr & 0x80000000 ) {
nkeynes@569
   775
	    if( addr < 0xC0000000 ) {
nkeynes@569
   776
		/* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@569
   777
		mmu_update_icache_phys(addr);
nkeynes@569
   778
		return TRUE;
nkeynes@569
   779
	    } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
nkeynes@569
   780
		MMU_READ_ADDR_ERROR();
nkeynes@569
   781
		return FALSE;
nkeynes@569
   782
	    }
nkeynes@569
   783
	} else {
nkeynes@569
   784
	    MMU_READ_ADDR_ERROR();
nkeynes@569
   785
	    return FALSE;
nkeynes@569
   786
	}
nkeynes@569
   787
    
nkeynes@569
   788
	uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@569
   789
	if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@569
   790
	    mmu_update_icache_phys(addr);
nkeynes@569
   791
	    return TRUE;
nkeynes@569
   792
	}
nkeynes@569
   793
nkeynes@569
   794
	entryNo = mmu_itlb_lookup_vpn( addr );
nkeynes@569
   795
    } else {
nkeynes@569
   796
	if( addr & 0x80000000 ) {
nkeynes@569
   797
	    MMU_READ_ADDR_ERROR();
nkeynes@569
   798
	    return FALSE;
nkeynes@569
   799
	}
nkeynes@569
   800
nkeynes@569
   801
	uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@569
   802
	if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@569
   803
	    mmu_update_icache_phys(addr);
nkeynes@569
   804
	    return TRUE;
nkeynes@569
   805
	}
nkeynes@569
   806
	
nkeynes@569
   807
	if( mmucr & MMUCR_SV ) {
nkeynes@569
   808
	    entryNo = mmu_itlb_lookup_vpn( addr );
nkeynes@569
   809
	} else {
nkeynes@569
   810
	    entryNo = mmu_itlb_lookup_vpn_asid( addr );
nkeynes@569
   811
	}
nkeynes@569
   812
	if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
nkeynes@569
   813
	    MMU_TLB_READ_PROT_ERROR(addr);
nkeynes@569
   814
	    return FALSE;
nkeynes@569
   815
	}
nkeynes@569
   816
    }
nkeynes@569
   817
nkeynes@569
   818
    switch(entryNo) {
nkeynes@569
   819
    case -1:
nkeynes@569
   820
	MMU_TLB_READ_MISS_ERROR(addr);
nkeynes@569
   821
	return FALSE;
nkeynes@569
   822
    case -2:
nkeynes@569
   823
	MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@569
   824
	return FALSE;
nkeynes@569
   825
    default:
nkeynes@569
   826
	sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
nkeynes@569
   827
	sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
nkeynes@569
   828
	if( sh4_icache.page == NULL ) {
nkeynes@569
   829
	    sh4_icache.page_vma = -1;
nkeynes@569
   830
	} else {
nkeynes@569
   831
	    sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
nkeynes@569
   832
	    sh4_icache.mask = mmu_itlb[entryNo].mask;
nkeynes@569
   833
	}
nkeynes@569
   834
	return TRUE;
nkeynes@569
   835
    }
nkeynes@569
   836
}
.