Search
lxdream.org :: lxdream/src/sh4/mmu.c
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 669:ab344e42bca9
prev597:87cbdf62aa35
next736:a02d1475ccfd
author nkeynes
date Wed Jun 25 10:03:28 2008 +0000 (15 years ago)
permissions -rw-r--r--
last change Add sh4_dump_region convenience function
file annotate diff log raw
nkeynes@550
     1
/**
nkeynes@586
     2
 * $Id$
nkeynes@550
     3
 * 
nkeynes@550
     4
 * MMU implementation
nkeynes@550
     5
 *
nkeynes@550
     6
 * Copyright (c) 2005 Nathan Keynes.
nkeynes@550
     7
 *
nkeynes@550
     8
 * This program is free software; you can redistribute it and/or modify
nkeynes@550
     9
 * it under the terms of the GNU General Public License as published by
nkeynes@550
    10
 * the Free Software Foundation; either version 2 of the License, or
nkeynes@550
    11
 * (at your option) any later version.
nkeynes@550
    12
 *
nkeynes@550
    13
 * This program is distributed in the hope that it will be useful,
nkeynes@550
    14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
nkeynes@550
    15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
nkeynes@550
    16
 * GNU General Public License for more details.
nkeynes@550
    17
 */
nkeynes@550
    18
#define MODULE sh4_module
nkeynes@550
    19
nkeynes@550
    20
#include <stdio.h>
nkeynes@550
    21
#include "sh4/sh4mmio.h"
nkeynes@550
    22
#include "sh4/sh4core.h"
nkeynes@669
    23
#include "sh4/sh4trans.h"
nkeynes@550
    24
#include "mem.h"
nkeynes@550
    25
nkeynes@586
    26
#define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
nkeynes@586
    27
nkeynes@586
    28
/* The MMU (practically unique in the system) is allowed to raise exceptions
nkeynes@586
    29
 * directly, with a return code indicating that one was raised and the caller
nkeynes@586
    30
 * had better behave appropriately.
nkeynes@586
    31
 */
nkeynes@586
    32
#define RAISE_TLB_ERROR(code, vpn) \
nkeynes@586
    33
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@586
    34
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
nkeynes@586
    35
    sh4_raise_tlb_exception(code);
nkeynes@586
    36
nkeynes@586
    37
#define RAISE_MEM_ERROR(code, vpn) \
nkeynes@586
    38
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@586
    39
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
nkeynes@586
    40
    sh4_raise_exception(code);
nkeynes@586
    41
nkeynes@586
    42
#define RAISE_OTHER_ERROR(code) \
nkeynes@586
    43
    sh4_raise_exception(code);
nkeynes@586
    44
/**
nkeynes@586
    45
 * Abort with a non-MMU address error. Caused by user-mode code attempting
nkeynes@586
    46
 * to access privileged regions, or alignment faults.
nkeynes@586
    47
 */
nkeynes@586
    48
#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
nkeynes@586
    49
#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
nkeynes@586
    50
nkeynes@586
    51
#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
nkeynes@586
    52
#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
nkeynes@586
    53
#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
nkeynes@586
    54
#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
nkeynes@586
    55
#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
nkeynes@586
    56
#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
nkeynes@586
    57
    MMIO_WRITE(MMU, TEA, vpn); \
nkeynes@586
    58
    MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
nkeynes@586
    59
nkeynes@586
    60
nkeynes@550
    61
#define OCRAM_START (0x1C000000>>PAGE_BITS)
nkeynes@550
    62
#define OCRAM_END   (0x20000000>>PAGE_BITS)
nkeynes@550
    63
nkeynes@550
    64
#define ITLB_ENTRY_COUNT 4
nkeynes@550
    65
#define UTLB_ENTRY_COUNT 64
nkeynes@550
    66
nkeynes@550
    67
/* Entry address */
nkeynes@550
    68
#define TLB_VALID     0x00000100
nkeynes@550
    69
#define TLB_USERMODE  0x00000040
nkeynes@550
    70
#define TLB_WRITABLE  0x00000020
nkeynes@586
    71
#define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
nkeynes@550
    72
#define TLB_SIZE_MASK 0x00000090
nkeynes@550
    73
#define TLB_SIZE_1K   0x00000000
nkeynes@550
    74
#define TLB_SIZE_4K   0x00000010
nkeynes@550
    75
#define TLB_SIZE_64K  0x00000080
nkeynes@550
    76
#define TLB_SIZE_1M   0x00000090
nkeynes@550
    77
#define TLB_CACHEABLE 0x00000008
nkeynes@550
    78
#define TLB_DIRTY     0x00000004
nkeynes@550
    79
#define TLB_SHARE     0x00000002
nkeynes@550
    80
#define TLB_WRITETHRU 0x00000001
nkeynes@550
    81
nkeynes@586
    82
#define MASK_1K  0xFFFFFC00
nkeynes@586
    83
#define MASK_4K  0xFFFFF000
nkeynes@586
    84
#define MASK_64K 0xFFFF0000
nkeynes@586
    85
#define MASK_1M  0xFFF00000
nkeynes@550
    86
nkeynes@550
    87
struct itlb_entry {
nkeynes@550
    88
    sh4addr_t vpn; // Virtual Page Number
nkeynes@550
    89
    uint32_t asid; // Process ID
nkeynes@586
    90
    uint32_t mask;
nkeynes@550
    91
    sh4addr_t ppn; // Physical Page Number
nkeynes@550
    92
    uint32_t flags;
nkeynes@550
    93
};
nkeynes@550
    94
nkeynes@550
    95
struct utlb_entry {
nkeynes@550
    96
    sh4addr_t vpn; // Virtual Page Number
nkeynes@586
    97
    uint32_t mask; // Page size mask
nkeynes@550
    98
    uint32_t asid; // Process ID
nkeynes@550
    99
    sh4addr_t ppn; // Physical Page Number
nkeynes@550
   100
    uint32_t flags;
nkeynes@550
   101
    uint32_t pcmcia; // extra pcmcia data - not used
nkeynes@550
   102
};
nkeynes@550
   103
nkeynes@550
   104
static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
nkeynes@550
   105
static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
nkeynes@550
   106
static uint32_t mmu_urc;
nkeynes@550
   107
static uint32_t mmu_urb;
nkeynes@550
   108
static uint32_t mmu_lrui;
nkeynes@586
   109
static uint32_t mmu_asid; // current asid
nkeynes@550
   110
nkeynes@550
   111
static sh4ptr_t cache = NULL;
nkeynes@550
   112
nkeynes@550
   113
static void mmu_invalidate_tlb();
nkeynes@550
   114
nkeynes@550
   115
nkeynes@586
   116
static uint32_t get_mask_for_flags( uint32_t flags )
nkeynes@586
   117
{
nkeynes@586
   118
    switch( flags & TLB_SIZE_MASK ) {
nkeynes@586
   119
    case TLB_SIZE_1K: return MASK_1K;
nkeynes@586
   120
    case TLB_SIZE_4K: return MASK_4K;
nkeynes@586
   121
    case TLB_SIZE_64K: return MASK_64K;
nkeynes@586
   122
    case TLB_SIZE_1M: return MASK_1M;
nkeynes@669
   123
    default: return 0; /* Unreachable */
nkeynes@586
   124
    }
nkeynes@586
   125
}
nkeynes@586
   126
nkeynes@550
   127
int32_t mmio_region_MMU_read( uint32_t reg )
nkeynes@550
   128
{
nkeynes@550
   129
    switch( reg ) {
nkeynes@550
   130
    case MMUCR:
nkeynes@550
   131
	return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
nkeynes@550
   132
    default:
nkeynes@550
   133
	return MMIO_READ( MMU, reg );
nkeynes@550
   134
    }
nkeynes@550
   135
}
nkeynes@550
   136
nkeynes@550
   137
void mmio_region_MMU_write( uint32_t reg, uint32_t val )
nkeynes@550
   138
{
nkeynes@586
   139
    uint32_t tmp;
nkeynes@550
   140
    switch(reg) {
nkeynes@550
   141
    case PTEH:
nkeynes@550
   142
	val &= 0xFFFFFCFF;
nkeynes@586
   143
	if( (val & 0xFF) != mmu_asid ) {
nkeynes@586
   144
	    mmu_asid = val&0xFF;
nkeynes@586
   145
	    sh4_icache.page_vma = -1; // invalidate icache as asid has changed
nkeynes@586
   146
	}
nkeynes@550
   147
	break;
nkeynes@550
   148
    case PTEL:
nkeynes@550
   149
	val &= 0x1FFFFDFF;
nkeynes@550
   150
	break;
nkeynes@550
   151
    case PTEA:
nkeynes@550
   152
	val &= 0x0000000F;
nkeynes@550
   153
	break;
nkeynes@550
   154
    case MMUCR:
nkeynes@550
   155
	if( val & MMUCR_TI ) {
nkeynes@550
   156
	    mmu_invalidate_tlb();
nkeynes@550
   157
	}
nkeynes@550
   158
	mmu_urc = (val >> 10) & 0x3F;
nkeynes@550
   159
	mmu_urb = (val >> 18) & 0x3F;
nkeynes@550
   160
	mmu_lrui = (val >> 26) & 0x3F;
nkeynes@550
   161
	val &= 0x00000301;
nkeynes@586
   162
	tmp = MMIO_READ( MMU, MMUCR );
nkeynes@586
   163
	if( ((val ^ tmp) & MMUCR_AT) && sh4_is_using_xlat() ) {
nkeynes@586
   164
	    // AT flag has changed state - flush the xlt cache as all bets
nkeynes@586
   165
	    // are off now. We also need to force an immediate exit from the
nkeynes@586
   166
	    // current block
nkeynes@586
   167
	    MMIO_WRITE( MMU, MMUCR, val );
nkeynes@586
   168
	    sh4_translate_flush_cache();
nkeynes@586
   169
	}
nkeynes@550
   170
	break;
nkeynes@550
   171
    case CCR:
nkeynes@550
   172
	mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA) );
nkeynes@550
   173
	break;
nkeynes@550
   174
    default:
nkeynes@550
   175
	break;
nkeynes@550
   176
    }
nkeynes@550
   177
    MMIO_WRITE( MMU, reg, val );
nkeynes@550
   178
}
nkeynes@550
   179
nkeynes@550
   180
nkeynes@550
   181
void MMU_init() 
nkeynes@550
   182
{
nkeynes@550
   183
    cache = mem_alloc_pages(2);
nkeynes@550
   184
}
nkeynes@550
   185
nkeynes@550
   186
void MMU_reset()
nkeynes@550
   187
{
nkeynes@550
   188
    mmio_region_MMU_write( CCR, 0 );
nkeynes@586
   189
    mmio_region_MMU_write( MMUCR, 0 );
nkeynes@550
   190
}
nkeynes@550
   191
nkeynes@550
   192
void MMU_save_state( FILE *f )
nkeynes@550
   193
{
nkeynes@550
   194
    fwrite( cache, 4096, 2, f );
nkeynes@550
   195
    fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
nkeynes@550
   196
    fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
nkeynes@586
   197
    fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
nkeynes@586
   198
    fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
nkeynes@586
   199
    fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
nkeynes@586
   200
    fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
nkeynes@550
   201
}
nkeynes@550
   202
nkeynes@550
   203
int MMU_load_state( FILE *f )
nkeynes@550
   204
{
nkeynes@550
   205
    /* Setup the cache mode according to the saved register value
nkeynes@550
   206
     * (mem_load runs before this point to load all MMIO data)
nkeynes@550
   207
     */
nkeynes@550
   208
    mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
nkeynes@550
   209
    if( fread( cache, 4096, 2, f ) != 2 ) {
nkeynes@550
   210
	return 1;
nkeynes@550
   211
    }
nkeynes@550
   212
    if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
nkeynes@550
   213
	return 1;
nkeynes@550
   214
    }
nkeynes@550
   215
    if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
nkeynes@550
   216
	return 1;
nkeynes@550
   217
    }
nkeynes@586
   218
    if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
nkeynes@586
   219
	return 1;
nkeynes@586
   220
    }
nkeynes@586
   221
    if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
nkeynes@586
   222
	return 1;
nkeynes@586
   223
    }
nkeynes@586
   224
    if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
nkeynes@586
   225
	return 1;
nkeynes@586
   226
    }
nkeynes@586
   227
    if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
nkeynes@586
   228
    	return 1;
nkeynes@586
   229
    }
nkeynes@550
   230
    return 0;
nkeynes@550
   231
}
nkeynes@550
   232
nkeynes@550
   233
void mmu_set_cache_mode( int mode )
nkeynes@550
   234
{
nkeynes@550
   235
    uint32_t i;
nkeynes@550
   236
    switch( mode ) {
nkeynes@550
   237
        case MEM_OC_INDEX0: /* OIX=0 */
nkeynes@550
   238
            for( i=OCRAM_START; i<OCRAM_END; i++ )
nkeynes@550
   239
                page_map[i] = cache + ((i&0x02)<<(PAGE_BITS-1));
nkeynes@550
   240
            break;
nkeynes@550
   241
        case MEM_OC_INDEX1: /* OIX=1 */
nkeynes@550
   242
            for( i=OCRAM_START; i<OCRAM_END; i++ )
nkeynes@550
   243
                page_map[i] = cache + ((i&0x02000000)>>(25-PAGE_BITS));
nkeynes@550
   244
            break;
nkeynes@550
   245
        default: /* disabled */
nkeynes@550
   246
            for( i=OCRAM_START; i<OCRAM_END; i++ )
nkeynes@550
   247
                page_map[i] = NULL;
nkeynes@550
   248
            break;
nkeynes@550
   249
    }
nkeynes@550
   250
}
nkeynes@550
   251
nkeynes@550
   252
/* TLB maintanence */
nkeynes@550
   253
nkeynes@550
   254
/**
nkeynes@550
   255
 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
nkeynes@550
   256
 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
nkeynes@550
   257
 */
nkeynes@550
   258
void MMU_ldtlb()
nkeynes@550
   259
{
nkeynes@550
   260
    mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
nkeynes@550
   261
    mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
nkeynes@550
   262
    mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
nkeynes@550
   263
    mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
nkeynes@550
   264
    mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
nkeynes@586
   265
    mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
nkeynes@550
   266
}
nkeynes@550
   267
nkeynes@550
   268
static void mmu_invalidate_tlb()
nkeynes@550
   269
{
nkeynes@550
   270
    int i;
nkeynes@550
   271
    for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
nkeynes@550
   272
	mmu_itlb[i].flags &= (~TLB_VALID);
nkeynes@550
   273
    }
nkeynes@550
   274
    for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
nkeynes@550
   275
	mmu_utlb[i].flags &= (~TLB_VALID);
nkeynes@550
   276
    }
nkeynes@550
   277
}
nkeynes@550
   278
nkeynes@550
   279
#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
nkeynes@550
   280
nkeynes@550
   281
int32_t mmu_itlb_addr_read( sh4addr_t addr )
nkeynes@550
   282
{
nkeynes@550
   283
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@550
   284
    return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
nkeynes@550
   285
}
nkeynes@550
   286
int32_t mmu_itlb_data_read( sh4addr_t addr )
nkeynes@550
   287
{
nkeynes@550
   288
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@550
   289
    return ent->ppn | ent->flags;
nkeynes@550
   290
}
nkeynes@550
   291
nkeynes@550
   292
void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   293
{
nkeynes@550
   294
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@550
   295
    ent->vpn = val & 0xFFFFFC00;
nkeynes@550
   296
    ent->asid = val & 0x000000FF;
nkeynes@550
   297
    ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
nkeynes@550
   298
}
nkeynes@550
   299
nkeynes@550
   300
void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   301
{
nkeynes@550
   302
    struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
nkeynes@550
   303
    ent->ppn = val & 0x1FFFFC00;
nkeynes@550
   304
    ent->flags = val & 0x00001DA;
nkeynes@586
   305
    ent->mask = get_mask_for_flags(val);
nkeynes@550
   306
}
nkeynes@550
   307
nkeynes@550
   308
#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
nkeynes@550
   309
#define UTLB_ASSOC(addr) (addr&0x80)
nkeynes@550
   310
#define UTLB_DATA2(addr) (addr&0x00800000)
nkeynes@550
   311
nkeynes@550
   312
int32_t mmu_utlb_addr_read( sh4addr_t addr )
nkeynes@550
   313
{
nkeynes@550
   314
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@550
   315
    return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
nkeynes@550
   316
	((ent->flags & TLB_DIRTY)<<7);
nkeynes@550
   317
}
nkeynes@550
   318
int32_t mmu_utlb_data_read( sh4addr_t addr )
nkeynes@550
   319
{
nkeynes@550
   320
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@550
   321
    if( UTLB_DATA2(addr) ) {
nkeynes@550
   322
	return ent->pcmcia;
nkeynes@550
   323
    } else {
nkeynes@550
   324
	return ent->ppn | ent->flags;
nkeynes@550
   325
    }
nkeynes@550
   326
}
nkeynes@550
   327
nkeynes@586
   328
/**
nkeynes@586
   329
 * Find a UTLB entry for the associative TLB write - same as the normal
nkeynes@586
   330
 * lookup but ignores the valid bit.
nkeynes@586
   331
 */
nkeynes@669
   332
static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
nkeynes@586
   333
{
nkeynes@586
   334
    int result = -1;
nkeynes@586
   335
    unsigned int i;
nkeynes@586
   336
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@586
   337
	if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@586
   338
	    ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && 
nkeynes@586
   339
	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@586
   340
	    if( result != -1 ) {
nkeynes@586
   341
		fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
nkeynes@586
   342
		return -2;
nkeynes@586
   343
	    }
nkeynes@586
   344
	    result = i;
nkeynes@586
   345
	}
nkeynes@586
   346
    }
nkeynes@586
   347
    return result;
nkeynes@586
   348
}
nkeynes@586
   349
nkeynes@586
   350
/**
nkeynes@586
   351
 * Find a ITLB entry for the associative TLB write - same as the normal
nkeynes@586
   352
 * lookup but ignores the valid bit.
nkeynes@586
   353
 */
nkeynes@669
   354
static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
nkeynes@586
   355
{
nkeynes@586
   356
    int result = -1;
nkeynes@586
   357
    unsigned int i;
nkeynes@586
   358
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@586
   359
	if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@586
   360
	    ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && 
nkeynes@586
   361
	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@586
   362
	    if( result != -1 ) {
nkeynes@586
   363
		return -2;
nkeynes@586
   364
	    }
nkeynes@586
   365
	    result = i;
nkeynes@586
   366
	}
nkeynes@586
   367
    }
nkeynes@586
   368
    return result;
nkeynes@586
   369
}
nkeynes@586
   370
nkeynes@550
   371
void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   372
{
nkeynes@550
   373
    if( UTLB_ASSOC(addr) ) {
nkeynes@586
   374
	int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
nkeynes@586
   375
	if( utlb >= 0 ) {
nkeynes@586
   376
	    struct utlb_entry *ent = &mmu_utlb[utlb];
nkeynes@586
   377
	    ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
nkeynes@586
   378
	    ent->flags |= (val & TLB_VALID);
nkeynes@586
   379
	    ent->flags |= ((val & 0x200)>>7);
nkeynes@586
   380
	}
nkeynes@586
   381
nkeynes@586
   382
	int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
nkeynes@586
   383
	if( itlb >= 0 ) {
nkeynes@586
   384
	    struct itlb_entry *ent = &mmu_itlb[itlb];
nkeynes@586
   385
	    ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
nkeynes@586
   386
	}
nkeynes@586
   387
nkeynes@586
   388
	if( itlb == -2 || utlb == -2 ) {
nkeynes@586
   389
	    MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@586
   390
	    return;
nkeynes@586
   391
	}
nkeynes@550
   392
    } else {
nkeynes@550
   393
	struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@550
   394
	ent->vpn = (val & 0xFFFFFC00);
nkeynes@550
   395
	ent->asid = (val & 0xFF);
nkeynes@550
   396
	ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
nkeynes@550
   397
	ent->flags |= (val & TLB_VALID);
nkeynes@550
   398
	ent->flags |= ((val & 0x200)>>7);
nkeynes@550
   399
    }
nkeynes@550
   400
}
nkeynes@550
   401
nkeynes@550
   402
void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   403
{
nkeynes@550
   404
    struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
nkeynes@550
   405
    if( UTLB_DATA2(addr) ) {
nkeynes@550
   406
	ent->pcmcia = val & 0x0000000F;
nkeynes@550
   407
    } else {
nkeynes@550
   408
	ent->ppn = (val & 0x1FFFFC00);
nkeynes@550
   409
	ent->flags = (val & 0x000001FF);
nkeynes@586
   410
	ent->mask = get_mask_for_flags(val);
nkeynes@550
   411
    }
nkeynes@550
   412
}
nkeynes@550
   413
nkeynes@550
   414
/* Cache access - not implemented */
nkeynes@550
   415
nkeynes@550
   416
int32_t mmu_icache_addr_read( sh4addr_t addr )
nkeynes@550
   417
{
nkeynes@550
   418
    return 0; // not implemented
nkeynes@550
   419
}
nkeynes@550
   420
int32_t mmu_icache_data_read( sh4addr_t addr )
nkeynes@550
   421
{
nkeynes@550
   422
    return 0; // not implemented
nkeynes@550
   423
}
nkeynes@550
   424
int32_t mmu_ocache_addr_read( sh4addr_t addr )
nkeynes@550
   425
{
nkeynes@550
   426
    return 0; // not implemented
nkeynes@550
   427
}
nkeynes@550
   428
int32_t mmu_ocache_data_read( sh4addr_t addr )
nkeynes@550
   429
{
nkeynes@550
   430
    return 0; // not implemented
nkeynes@550
   431
}
nkeynes@550
   432
nkeynes@550
   433
void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   434
{
nkeynes@550
   435
}
nkeynes@550
   436
nkeynes@550
   437
void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   438
{
nkeynes@550
   439
}
nkeynes@550
   440
nkeynes@550
   441
void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   442
{
nkeynes@550
   443
}
nkeynes@550
   444
nkeynes@550
   445
void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
nkeynes@550
   446
{
nkeynes@550
   447
}
nkeynes@586
   448
nkeynes@586
   449
/******************************************************************************/
nkeynes@586
   450
/*                        MMU TLB address translation                         */
nkeynes@586
   451
/******************************************************************************/
nkeynes@586
   452
nkeynes@586
   453
/**
nkeynes@586
   454
 * The translations are excessively complicated, but unfortunately it's a 
nkeynes@586
   455
 * complicated system. TODO: make this not be painfully slow.
nkeynes@586
   456
 */
nkeynes@586
   457
nkeynes@586
   458
/**
nkeynes@586
   459
 * Perform the actual utlb lookup w/ asid matching.
nkeynes@586
   460
 * Possible utcomes are:
nkeynes@586
   461
 *   0..63 Single match - good, return entry found
nkeynes@586
   462
 *   -1 No match - raise a tlb data miss exception
nkeynes@586
   463
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@586
   464
 * @param vpn virtual address to resolve
nkeynes@586
   465
 * @return the resultant UTLB entry, or an error.
nkeynes@586
   466
 */
nkeynes@586
   467
static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
nkeynes@586
   468
{
nkeynes@586
   469
    int result = -1;
nkeynes@586
   470
    unsigned int i;
nkeynes@586
   471
nkeynes@586
   472
    mmu_urc++;
nkeynes@586
   473
    if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
nkeynes@586
   474
	mmu_urc = 0;
nkeynes@586
   475
    }
nkeynes@586
   476
nkeynes@586
   477
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@586
   478
	if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@586
   479
	    ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) && 
nkeynes@586
   480
	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@586
   481
	    if( result != -1 ) {
nkeynes@586
   482
		return -2;
nkeynes@586
   483
	    }
nkeynes@586
   484
	    result = i;
nkeynes@586
   485
	}
nkeynes@586
   486
    }
nkeynes@586
   487
    return result;
nkeynes@586
   488
}
nkeynes@586
   489
nkeynes@586
   490
/**
nkeynes@586
   491
 * Perform the actual utlb lookup matching on vpn only
nkeynes@586
   492
 * Possible utcomes are:
nkeynes@586
   493
 *   0..63 Single match - good, return entry found
nkeynes@586
   494
 *   -1 No match - raise a tlb data miss exception
nkeynes@586
   495
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@586
   496
 * @param vpn virtual address to resolve
nkeynes@586
   497
 * @return the resultant UTLB entry, or an error.
nkeynes@586
   498
 */
nkeynes@586
   499
static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
nkeynes@586
   500
{
nkeynes@586
   501
    int result = -1;
nkeynes@586
   502
    unsigned int i;
nkeynes@586
   503
nkeynes@586
   504
    mmu_urc++;
nkeynes@586
   505
    if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
nkeynes@586
   506
	mmu_urc = 0;
nkeynes@586
   507
    }
nkeynes@586
   508
nkeynes@586
   509
    for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
nkeynes@586
   510
	if( (mmu_utlb[i].flags & TLB_VALID) &&
nkeynes@586
   511
	    ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
nkeynes@586
   512
	    if( result != -1 ) {
nkeynes@586
   513
		return -2;
nkeynes@586
   514
	    }
nkeynes@586
   515
	    result = i;
nkeynes@586
   516
	}
nkeynes@586
   517
    }
nkeynes@586
   518
nkeynes@586
   519
    return result;
nkeynes@586
   520
}
nkeynes@586
   521
nkeynes@586
   522
/**
nkeynes@586
   523
 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
nkeynes@586
   524
 * @return the number (0-3) of the replaced entry.
nkeynes@586
   525
 */
nkeynes@586
   526
static int inline mmu_itlb_update_from_utlb( int entryNo )
nkeynes@586
   527
{
nkeynes@586
   528
    int replace;
nkeynes@586
   529
    /* Determine entry to replace based on lrui */
nkeynes@586
   530
    if( (mmu_lrui & 0x38) == 0x38 ) {
nkeynes@586
   531
	replace = 0;
nkeynes@586
   532
	mmu_lrui = mmu_lrui & 0x07;
nkeynes@586
   533
    } else if( (mmu_lrui & 0x26) == 0x06 ) {
nkeynes@586
   534
	replace = 1;
nkeynes@586
   535
	mmu_lrui = (mmu_lrui & 0x19) | 0x20;
nkeynes@586
   536
    } else if( (mmu_lrui & 0x15) == 0x01 ) {
nkeynes@586
   537
	replace = 2;
nkeynes@586
   538
	mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
nkeynes@586
   539
    } else { // Note - gets invalid entries too
nkeynes@586
   540
	replace = 3;
nkeynes@586
   541
	mmu_lrui = (mmu_lrui | 0x0B);
nkeynes@586
   542
    } 
nkeynes@586
   543
nkeynes@586
   544
    mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
nkeynes@586
   545
    mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
nkeynes@586
   546
    mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
nkeynes@586
   547
    mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
nkeynes@586
   548
    mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
nkeynes@586
   549
    return replace;
nkeynes@586
   550
}
nkeynes@586
   551
nkeynes@586
   552
/**
nkeynes@586
   553
 * Perform the actual itlb lookup w/ asid protection
nkeynes@586
   554
 * Possible utcomes are:
nkeynes@586
   555
 *   0..63 Single match - good, return entry found
nkeynes@586
   556
 *   -1 No match - raise a tlb data miss exception
nkeynes@586
   557
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@586
   558
 * @param vpn virtual address to resolve
nkeynes@586
   559
 * @return the resultant ITLB entry, or an error.
nkeynes@586
   560
 */
nkeynes@586
   561
static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
nkeynes@586
   562
{
nkeynes@586
   563
    int result = -1;
nkeynes@586
   564
    unsigned int i;
nkeynes@586
   565
nkeynes@586
   566
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@586
   567
	if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@586
   568
	    ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) && 
nkeynes@586
   569
	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@586
   570
	    if( result != -1 ) {
nkeynes@586
   571
		return -2;
nkeynes@586
   572
	    }
nkeynes@586
   573
	    result = i;
nkeynes@586
   574
	}
nkeynes@586
   575
    }
nkeynes@586
   576
nkeynes@586
   577
    if( result == -1 ) {
nkeynes@586
   578
	int utlbEntry = mmu_utlb_lookup_vpn_asid( vpn );
nkeynes@586
   579
	if( utlbEntry < 0 ) {
nkeynes@586
   580
	    return utlbEntry;
nkeynes@586
   581
	} else {
nkeynes@586
   582
	    return mmu_itlb_update_from_utlb( utlbEntry );
nkeynes@586
   583
	}
nkeynes@586
   584
    }
nkeynes@586
   585
nkeynes@586
   586
    switch( result ) {
nkeynes@586
   587
    case 0: mmu_lrui = (mmu_lrui & 0x07); break;
nkeynes@586
   588
    case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
nkeynes@586
   589
    case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
nkeynes@586
   590
    case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
nkeynes@586
   591
    }
nkeynes@586
   592
	
nkeynes@586
   593
    return result;
nkeynes@586
   594
}
nkeynes@586
   595
nkeynes@586
   596
/**
nkeynes@586
   597
 * Perform the actual itlb lookup on vpn only
nkeynes@586
   598
 * Possible utcomes are:
nkeynes@586
   599
 *   0..63 Single match - good, return entry found
nkeynes@586
   600
 *   -1 No match - raise a tlb data miss exception
nkeynes@586
   601
 *   -2 Multiple matches - raise a multi-hit exception (reset)
nkeynes@586
   602
 * @param vpn virtual address to resolve
nkeynes@586
   603
 * @return the resultant ITLB entry, or an error.
nkeynes@586
   604
 */
nkeynes@586
   605
static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
nkeynes@586
   606
{
nkeynes@586
   607
    int result = -1;
nkeynes@586
   608
    unsigned int i;
nkeynes@586
   609
nkeynes@586
   610
    for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
nkeynes@586
   611
	if( (mmu_itlb[i].flags & TLB_VALID) &&
nkeynes@586
   612
	    ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
nkeynes@586
   613
	    if( result != -1 ) {
nkeynes@586
   614
		return -2;
nkeynes@586
   615
	    }
nkeynes@586
   616
	    result = i;
nkeynes@586
   617
	}
nkeynes@586
   618
    }
nkeynes@586
   619
nkeynes@586
   620
    if( result == -1 ) {
nkeynes@586
   621
	int utlbEntry = mmu_utlb_lookup_vpn( vpn );
nkeynes@586
   622
	if( utlbEntry < 0 ) {
nkeynes@586
   623
	    return utlbEntry;
nkeynes@586
   624
	} else {
nkeynes@586
   625
	    return mmu_itlb_update_from_utlb( utlbEntry );
nkeynes@586
   626
	}
nkeynes@586
   627
    }
nkeynes@586
   628
nkeynes@586
   629
    switch( result ) {
nkeynes@586
   630
    case 0: mmu_lrui = (mmu_lrui & 0x07); break;
nkeynes@586
   631
    case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
nkeynes@586
   632
    case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
nkeynes@586
   633
    case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
nkeynes@586
   634
    }
nkeynes@586
   635
	
nkeynes@586
   636
    return result;
nkeynes@586
   637
}
nkeynes@586
   638
nkeynes@586
   639
sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
nkeynes@586
   640
{
nkeynes@586
   641
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@586
   642
    if( addr & 0x80000000 ) {
nkeynes@586
   643
	if( IS_SH4_PRIVMODE() ) {
nkeynes@586
   644
	    if( addr >= 0xE0000000 ) {
nkeynes@586
   645
		return addr; /* P4 - passthrough */
nkeynes@586
   646
	    } else if( addr < 0xC0000000 ) {
nkeynes@586
   647
		/* P1, P2 regions are pass-through (no translation) */
nkeynes@586
   648
		return VMA_TO_EXT_ADDR(addr);
nkeynes@586
   649
	    }
nkeynes@586
   650
	} else {
nkeynes@586
   651
	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
nkeynes@586
   652
		((mmucr&MMUCR_SQMD) == 0) ) {
nkeynes@586
   653
		/* Conditional user-mode access to the store-queue (no translation) */
nkeynes@586
   654
		return addr;
nkeynes@586
   655
	    }
nkeynes@586
   656
	    MMU_READ_ADDR_ERROR();
nkeynes@586
   657
	    return MMU_VMA_ERROR;
nkeynes@586
   658
	}
nkeynes@586
   659
    }
nkeynes@586
   660
    
nkeynes@586
   661
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@586
   662
	return VMA_TO_EXT_ADDR(addr);
nkeynes@586
   663
    }
nkeynes@586
   664
nkeynes@586
   665
    /* If we get this far, translation is required */
nkeynes@586
   666
    int entryNo;
nkeynes@586
   667
    if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
nkeynes@586
   668
	entryNo = mmu_utlb_lookup_vpn_asid( addr );
nkeynes@586
   669
    } else {
nkeynes@586
   670
	entryNo = mmu_utlb_lookup_vpn( addr );
nkeynes@586
   671
    }
nkeynes@586
   672
nkeynes@586
   673
    switch(entryNo) {
nkeynes@586
   674
    case -1:
nkeynes@586
   675
	MMU_TLB_READ_MISS_ERROR(addr);
nkeynes@586
   676
	return MMU_VMA_ERROR;
nkeynes@586
   677
    case -2:
nkeynes@586
   678
	MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@586
   679
	return MMU_VMA_ERROR;
nkeynes@586
   680
    default:
nkeynes@586
   681
	if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
nkeynes@586
   682
	    !IS_SH4_PRIVMODE() ) {
nkeynes@586
   683
	    /* protection violation */
nkeynes@586
   684
	    MMU_TLB_READ_PROT_ERROR(addr);
nkeynes@586
   685
	    return MMU_VMA_ERROR;
nkeynes@586
   686
	}
nkeynes@586
   687
nkeynes@586
   688
	/* finally generate the target address */
nkeynes@586
   689
	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
nkeynes@586
   690
	    (addr & (~mmu_utlb[entryNo].mask));
nkeynes@586
   691
    }
nkeynes@586
   692
}
nkeynes@586
   693
nkeynes@586
   694
sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
nkeynes@586
   695
{
nkeynes@586
   696
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@586
   697
    if( addr & 0x80000000 ) {
nkeynes@586
   698
	if( IS_SH4_PRIVMODE() ) {
nkeynes@586
   699
	    if( addr >= 0xE0000000 ) {
nkeynes@586
   700
		return addr; /* P4 - passthrough */
nkeynes@586
   701
	    } else if( addr < 0xC0000000 ) {
nkeynes@586
   702
		/* P1, P2 regions are pass-through (no translation) */
nkeynes@586
   703
		return VMA_TO_EXT_ADDR(addr);
nkeynes@586
   704
	    }
nkeynes@586
   705
	} else {
nkeynes@586
   706
	    if( addr >= 0xE0000000 && addr < 0xE4000000 &&
nkeynes@586
   707
		((mmucr&MMUCR_SQMD) == 0) ) {
nkeynes@586
   708
		/* Conditional user-mode access to the store-queue (no translation) */
nkeynes@586
   709
		return addr;
nkeynes@586
   710
	    }
nkeynes@586
   711
	    MMU_WRITE_ADDR_ERROR();
nkeynes@586
   712
	    return MMU_VMA_ERROR;
nkeynes@586
   713
	}
nkeynes@586
   714
    }
nkeynes@586
   715
    
nkeynes@586
   716
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@586
   717
	return VMA_TO_EXT_ADDR(addr);
nkeynes@586
   718
    }
nkeynes@586
   719
nkeynes@586
   720
    /* If we get this far, translation is required */
nkeynes@586
   721
    int entryNo;
nkeynes@586
   722
    if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
nkeynes@586
   723
	entryNo = mmu_utlb_lookup_vpn_asid( addr );
nkeynes@586
   724
    } else {
nkeynes@586
   725
	entryNo = mmu_utlb_lookup_vpn( addr );
nkeynes@586
   726
    }
nkeynes@586
   727
nkeynes@586
   728
    switch(entryNo) {
nkeynes@586
   729
    case -1:
nkeynes@586
   730
	MMU_TLB_WRITE_MISS_ERROR(addr);
nkeynes@586
   731
	return MMU_VMA_ERROR;
nkeynes@586
   732
    case -2:
nkeynes@586
   733
	MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@586
   734
	return MMU_VMA_ERROR;
nkeynes@586
   735
    default:
nkeynes@586
   736
	if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
nkeynes@586
   737
	    : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
nkeynes@586
   738
	    /* protection violation */
nkeynes@586
   739
	    MMU_TLB_WRITE_PROT_ERROR(addr);
nkeynes@586
   740
	    return MMU_VMA_ERROR;
nkeynes@586
   741
	}
nkeynes@586
   742
nkeynes@586
   743
	if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
nkeynes@586
   744
	    MMU_TLB_INITIAL_WRITE_ERROR(addr);
nkeynes@586
   745
	    return MMU_VMA_ERROR;
nkeynes@586
   746
	}
nkeynes@586
   747
nkeynes@586
   748
	/* finally generate the target address */
nkeynes@586
   749
	return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
nkeynes@586
   750
	    (addr & (~mmu_utlb[entryNo].mask));
nkeynes@586
   751
    }
nkeynes@586
   752
}
nkeynes@586
   753
nkeynes@586
   754
/**
nkeynes@586
   755
 * Update the icache for an untranslated address
nkeynes@586
   756
 */
nkeynes@586
   757
void mmu_update_icache_phys( sh4addr_t addr )
nkeynes@586
   758
{
nkeynes@586
   759
    if( (addr & 0x1C000000) == 0x0C000000 ) {
nkeynes@586
   760
	/* Main ram */
nkeynes@586
   761
	sh4_icache.page_vma = addr & 0xFF000000;
nkeynes@586
   762
	sh4_icache.page_ppa = 0x0C000000;
nkeynes@586
   763
	sh4_icache.mask = 0xFF000000;
nkeynes@586
   764
	sh4_icache.page = sh4_main_ram;
nkeynes@586
   765
    } else if( (addr & 0x1FE00000) == 0 ) {
nkeynes@586
   766
	/* BIOS ROM */
nkeynes@586
   767
	sh4_icache.page_vma = addr & 0xFFE00000;
nkeynes@586
   768
	sh4_icache.page_ppa = 0;
nkeynes@586
   769
	sh4_icache.mask = 0xFFE00000;
nkeynes@586
   770
	sh4_icache.page = mem_get_region(0);
nkeynes@586
   771
    } else {
nkeynes@586
   772
	/* not supported */
nkeynes@586
   773
	sh4_icache.page_vma = -1;
nkeynes@586
   774
    }
nkeynes@586
   775
}
nkeynes@586
   776
nkeynes@586
   777
/**
nkeynes@586
   778
 * Update the sh4_icache structure to describe the page(s) containing the
nkeynes@586
   779
 * given vma. If the address does not reference a RAM/ROM region, the icache
nkeynes@586
   780
 * will be invalidated instead.
nkeynes@586
   781
 * If AT is on, this method will raise TLB exceptions normally
nkeynes@586
   782
 * (hence this method should only be used immediately prior to execution of
nkeynes@586
   783
 * code), and otherwise will set the icache according to the matching TLB entry.
nkeynes@586
   784
 * If AT is off, this method will set the entire referenced RAM/ROM region in
nkeynes@586
   785
 * the icache.
nkeynes@586
   786
 * @return TRUE if the update completed (successfully or otherwise), FALSE
nkeynes@586
   787
 * if an exception was raised.
nkeynes@586
   788
 */
nkeynes@586
   789
gboolean mmu_update_icache( sh4vma_t addr )
nkeynes@586
   790
{
nkeynes@586
   791
    int entryNo;
nkeynes@586
   792
    if( IS_SH4_PRIVMODE()  ) {
nkeynes@586
   793
	if( addr & 0x80000000 ) {
nkeynes@586
   794
	    if( addr < 0xC0000000 ) {
nkeynes@586
   795
		/* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@586
   796
		mmu_update_icache_phys(addr);
nkeynes@586
   797
		return TRUE;
nkeynes@586
   798
	    } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
nkeynes@586
   799
		MMU_READ_ADDR_ERROR();
nkeynes@586
   800
		return FALSE;
nkeynes@586
   801
	    }
nkeynes@586
   802
	}
nkeynes@586
   803
    
nkeynes@586
   804
	uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@586
   805
	if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@586
   806
	    mmu_update_icache_phys(addr);
nkeynes@586
   807
	    return TRUE;
nkeynes@586
   808
	}
nkeynes@586
   809
nkeynes@586
   810
	entryNo = mmu_itlb_lookup_vpn( addr );
nkeynes@586
   811
    } else {
nkeynes@586
   812
	if( addr & 0x80000000 ) {
nkeynes@586
   813
	    MMU_READ_ADDR_ERROR();
nkeynes@586
   814
	    return FALSE;
nkeynes@586
   815
	}
nkeynes@586
   816
nkeynes@586
   817
	uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@586
   818
	if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@586
   819
	    mmu_update_icache_phys(addr);
nkeynes@586
   820
	    return TRUE;
nkeynes@586
   821
	}
nkeynes@586
   822
	
nkeynes@586
   823
	if( mmucr & MMUCR_SV ) {
nkeynes@586
   824
	    entryNo = mmu_itlb_lookup_vpn( addr );
nkeynes@586
   825
	} else {
nkeynes@586
   826
	    entryNo = mmu_itlb_lookup_vpn_asid( addr );
nkeynes@586
   827
	}
nkeynes@586
   828
	if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
nkeynes@586
   829
	    MMU_TLB_READ_PROT_ERROR(addr);
nkeynes@586
   830
	    return FALSE;
nkeynes@586
   831
	}
nkeynes@586
   832
    }
nkeynes@586
   833
nkeynes@586
   834
    switch(entryNo) {
nkeynes@586
   835
    case -1:
nkeynes@586
   836
	MMU_TLB_READ_MISS_ERROR(addr);
nkeynes@586
   837
	return FALSE;
nkeynes@586
   838
    case -2:
nkeynes@586
   839
	MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@586
   840
	return FALSE;
nkeynes@586
   841
    default:
nkeynes@586
   842
	sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
nkeynes@586
   843
	sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
nkeynes@586
   844
	if( sh4_icache.page == NULL ) {
nkeynes@586
   845
	    sh4_icache.page_vma = -1;
nkeynes@586
   846
	} else {
nkeynes@586
   847
	    sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
nkeynes@586
   848
	    sh4_icache.mask = mmu_itlb[entryNo].mask;
nkeynes@586
   849
	}
nkeynes@586
   850
	return TRUE;
nkeynes@586
   851
    }
nkeynes@586
   852
}
nkeynes@586
   853
nkeynes@597
   854
/**
nkeynes@597
   855
 * Translate address for disassembly purposes (ie performs an instruction 
nkeynes@597
   856
 * lookup) - does not raise exceptions or modify any state, and ignores
nkeynes@597
   857
 * protection bits. Returns the translated address, or MMU_VMA_ERROR
nkeynes@597
   858
 * on translation failure. 
nkeynes@597
   859
 */
nkeynes@597
   860
sh4addr_t mmu_vma_to_phys_disasm( sh4vma_t vma )
nkeynes@597
   861
{
nkeynes@597
   862
    if( vma & 0x80000000 ) {
nkeynes@597
   863
	if( vma < 0xC0000000 ) {
nkeynes@597
   864
	    /* P1, P2 and P4 regions are pass-through (no translation) */
nkeynes@597
   865
	    return VMA_TO_EXT_ADDR(vma);
nkeynes@597
   866
	} else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
nkeynes@597
   867
	    /* Not translatable */
nkeynes@597
   868
	    return MMU_VMA_ERROR;
nkeynes@597
   869
	}
nkeynes@597
   870
    }
nkeynes@597
   871
nkeynes@597
   872
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@597
   873
    if( (mmucr & MMUCR_AT) == 0 ) {
nkeynes@597
   874
	return VMA_TO_EXT_ADDR(vma);
nkeynes@597
   875
    }
nkeynes@597
   876
    
nkeynes@597
   877
    int entryNo = mmu_itlb_lookup_vpn( vma );
nkeynes@597
   878
    if( entryNo == -2 ) {
nkeynes@597
   879
	entryNo = mmu_itlb_lookup_vpn_asid( vma );
nkeynes@597
   880
    }
nkeynes@597
   881
    if( entryNo < 0 ) {
nkeynes@597
   882
	return MMU_VMA_ERROR;
nkeynes@597
   883
    } else {
nkeynes@597
   884
	return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) | 
nkeynes@597
   885
	    (vma & (~mmu_itlb[entryNo].mask));	
nkeynes@597
   886
    }
nkeynes@597
   887
}
nkeynes@597
   888
nkeynes@586
   889
gboolean sh4_flush_store_queue( sh4addr_t addr )
nkeynes@586
   890
{
nkeynes@586
   891
    uint32_t mmucr = MMIO_READ(MMU,MMUCR);
nkeynes@586
   892
    int queue = (addr&0x20)>>2;
nkeynes@586
   893
    sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
nkeynes@586
   894
    sh4addr_t target;
nkeynes@586
   895
    /* Store queue operation */
nkeynes@586
   896
    if( mmucr & MMUCR_AT ) {
nkeynes@586
   897
	int entryNo;
nkeynes@586
   898
	if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
nkeynes@586
   899
	    entryNo = mmu_utlb_lookup_vpn_asid( addr );
nkeynes@586
   900
	} else {
nkeynes@586
   901
	    entryNo = mmu_utlb_lookup_vpn( addr );
nkeynes@586
   902
	}
nkeynes@586
   903
	switch(entryNo) {
nkeynes@586
   904
	case -1:
nkeynes@586
   905
	    MMU_TLB_WRITE_MISS_ERROR(addr);
nkeynes@586
   906
	    return FALSE;
nkeynes@586
   907
	case -2:
nkeynes@586
   908
	    MMU_TLB_MULTI_HIT_ERROR(addr);
nkeynes@586
   909
	    return FALSE;
nkeynes@586
   910
	default:
nkeynes@586
   911
	    if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
nkeynes@586
   912
		: ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
nkeynes@586
   913
		/* protection violation */
nkeynes@586
   914
		MMU_TLB_WRITE_PROT_ERROR(addr);
nkeynes@586
   915
		return FALSE;
nkeynes@586
   916
	    }
nkeynes@586
   917
	    
nkeynes@586
   918
	    if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
nkeynes@586
   919
		MMU_TLB_INITIAL_WRITE_ERROR(addr);
nkeynes@586
   920
		return FALSE;
nkeynes@586
   921
	    }
nkeynes@586
   922
	    
nkeynes@586
   923
	    /* finally generate the target address */
nkeynes@586
   924
	    target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | 
nkeynes@586
   925
		      (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
nkeynes@586
   926
	}
nkeynes@586
   927
    } else {
nkeynes@586
   928
	uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;
nkeynes@586
   929
	target = (addr&0x03FFFFE0) | hi;
nkeynes@586
   930
    }
nkeynes@586
   931
    mem_copy_to_sh4( target, src, 32 );
nkeynes@586
   932
    return TRUE;
nkeynes@586
   933
}
nkeynes@586
   934
.